Separate from ion-sfu (#171)

* Separate from ion-sfu

changes:
1. extract pkg/buffer, twcc, sfu, relay, stats, logger

2. to solve cycle import, move ion-sfu/pkg/logger to pkg/sfu/logger

3. replace pion/ion-sfu => ./
reason: will change import pion/ion-sfu/pkg/* to livekit-server/pkg/*
after this pr merged. Just not change any code in this pr, because it
will confused with the separate code from ion-sfu in review.

* Move code from ion-sfu to pkg/sfu

* fix build error for resovle conflict

Co-authored-by: cnderrauber <zengjie9004@gmail.com>
This commit is contained in:
cnderrauber
2021-11-09 12:03:16 +08:00
committed by GitHub
parent 289ebd32ff
commit 1e1aaeb86b
58 changed files with 12210 additions and 384 deletions

15
go.mod
View File

@@ -5,44 +5,43 @@ go 1.15
require (
github.com/bep/debounce v1.2.0
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/gammazero/deque v0.1.0
github.com/gammazero/workerpool v1.1.2
github.com/go-logr/logr v1.2.0
github.com/go-logr/zapr v1.1.0
github.com/go-logr/zerologr v1.2.0
github.com/go-redis/redis/v8 v8.11.3
github.com/google/subcommands v1.2.0 // indirect
github.com/google/wire v0.5.0
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/golang-lru v0.5.4
github.com/jxskiss/base62 v0.0.0-20191017122030-4f11678b909b
github.com/livekit/protocol v0.10.1-0.20211109000312-b3847c8d35ff
github.com/lucsky/cuid v1.2.1
github.com/magefile/mage v1.11.0
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0
github.com/mitchellh/go-homedir v1.1.0
github.com/olekukonko/tablewriter v0.0.5
github.com/pion/dtls/v2 v2.0.10
github.com/pion/ice/v2 v2.1.12
github.com/pion/interceptor v0.1.0
github.com/pion/ion-sfu v1.10.5
github.com/pion/logging v0.2.2
github.com/pion/rtcp v1.2.8
github.com/pion/rtp v1.7.2
github.com/pion/sdp/v3 v3.0.4
github.com/pion/stun v0.3.5
github.com/pion/transport v0.12.3
github.com/pion/turn/v2 v2.0.5
github.com/pion/webrtc/v3 v3.1.5
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/rs/zerolog v1.25.0
github.com/stretchr/testify v1.7.0
github.com/thoas/go-funk v0.8.0
github.com/twitchtv/twirp v8.1.0+incompatible
github.com/urfave/cli/v2 v2.3.0
github.com/urfave/negroni v1.0.0
go.uber.org/zap v1.19.1
golang.org/x/mod v0.5.1 // indirect
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect
google.golang.org/grpc v1.42.0
google.golang.org/protobuf v1.27.1
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
)
replace github.com/pion/ion-sfu => github.com/livekit/ion-sfu v1.20.16

388
go.sum
View File

@@ -1,38 +1,12 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A=
github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU=
github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -41,61 +15,36 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bep/debounce v1.2.0 h1:wXds8Kq8qRfwAOpAxHrJDbCXgC5aHSzgQb/0gKsHQqo=
github.com/bep/debounce v1.2.0/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8 h1:SjZ2GvvOononHOpK84APFuMvxqsk3tEIaKH/z4Rpu3g=
github.com/c9s/goprocinfo v0.0.0-20210130143923-c95fcf8c64a8/go.mod h1:uEyr4WpAH4hio6LFriaPkL938XnrvLpNPmQHBdrmbIE=
github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ=
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d h1:U+s90UTSYgptZMwQh2aRr3LuazLJIa+Pg3Kc1ylSYVY=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/channels v1.1.0 h1:F1taHcn7/F0i8DYqKXJnyhJcVpp2kgFcNePxXtnyu4k=
github.com/eapache/channels v1.1.0/go.mod h1:jMm2qB5Ubtg9zLd+inMZd2/NUvXgzmWXsDaLyQIGfH0=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
@@ -104,10 +53,8 @@ github.com/gammazero/deque v0.1.0/go.mod h1:KQw7vFau1hHuM8xmI9RbgKFbAsQFWmBpqQ2K
github.com/gammazero/workerpool v1.1.2 h1:vuioDQbgrz4HoaCi2q1HLlOXdpbap5AET7xu5/qj87g=
github.com/gammazero/workerpool v1.1.2/go.mod h1:UelbXcO0zCIGFcufcirHhq2/xtLXJdQ29qZNlXG9OjQ=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
@@ -121,21 +68,12 @@ github.com/go-logr/zerologr v1.2.0 h1:oS1fjSSEHwpv8Lam3SNmPTLTUw6V4DoB2ZzryqrkMB
github.com/go-logr/zerologr v1.2.0/go.mod h1:O82obOiXzyxiBNgAMRT1m+XbOvY8K18Kf6XhT52oqoc=
github.com/go-redis/redis/v8 v8.11.3 h1:GCjoYp8c+yQTJfc0n69iwSiHjvuAdruxl7elnZCxgt8=
github.com/go-redis/redis/v8 v8.11.3/go.mod h1:xNJ9xDG09FsIPwh3bWdk+0oDWHbtF9rPN0F/oD9XeKc=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -151,9 +89,6 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -164,81 +99,28 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/subcommands v1.0.1 h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k=
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE=
github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.5.0 h1:I7ELFeVBr3yfPIcc8+MWvrjk+3VjbcSzoXm3JVa+jD8=
github.com/google/wire v0.5.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
github.com/improbable-eng/grpc-web v0.13.0/go.mod h1:6hRR09jOEG81ADP5wCQju1z71g6OL4eEvELdran/3cs=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jxskiss/base62 v0.0.0-20191017122030-4f11678b909b h1:XUr8tvMEILhphQPp3TFcIudb5KTOzFeD0pJyDn5+5QI=
github.com/jxskiss/base62 v0.0.0-20191017122030-4f11678b909b/go.mod h1:a5Mn24iYVJRUQSkFupGByqykzD+k+wFI8J91zGHuPf8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -246,93 +128,46 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
github.com/lithammer/shortuuid/v3 v3.0.6 h1:pr15YQyvhiSX/qPxncFtqk+v4xLEpOZObbsY/mKrcvA=
github.com/lithammer/shortuuid/v3 v3.0.6/go.mod h1:vMk8ke37EmiewwolSO1NLW8vP4ZaKlRuDIi8tWWmAts=
github.com/livekit/ion-sfu v1.20.16 h1:B4+z0sf4t3zZSXFIwHive8malNn6Vje+7b1OW4ETDOM=
github.com/livekit/ion-sfu v1.20.16/go.mod h1:sUjL3tZRROs3NjCm6ZLT+IsisdYVRtxfq4OhVFHVd/A=
github.com/livekit/protocol v0.10.1-0.20211109000312-b3847c8d35ff h1:21SZ2sh5e7ELCVdXT01hlpdSZyNlwDv6KTOlcplBrQ8=
github.com/livekit/protocol v0.10.1-0.20211109000312-b3847c8d35ff/go.mod h1:7ir9zSlgnrPQoGGNv4f8U/c9QrWh+ogC9B5xVbJNedM=
github.com/lucsky/cuid v1.0.2 h1:z4XlExeoderxoPj2/dxKOyPxe9RCOu7yNq9/XWxIUMQ=
github.com/lucsky/cuid v1.0.2/go.mod h1:QaaJqckboimOmhRSJXSx/+IT+VTfxfPGSo/6mfgUfmE=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/lucsky/cuid v1.2.1 h1:MtJrL2OFhvYufUIn48d35QGXyeTC8tn0upumW9WwTHg=
github.com/lucsky/cuid v1.2.1/go.mod h1:QaaJqckboimOmhRSJXSx/+IT+VTfxfPGSo/6mfgUfmE=
github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls=
github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0 h1:8E6DrFvII6QR4eJ3PkFvV+lc03P+2qwqTPLm1ax7694=
github.com/maxbrunsfeld/counterfeiter/v6 v6.3.0/go.mod h1:fcEyUyXZXoV4Abw8DX0t7wyL8mCDxXyU4iAFZfT3IHw=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg=
github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pion/datachannel v1.4.21 h1:3ZvhNyfmxsAqltQrApLPQMhSFNA+aT87RqyCq4OXmf0=
github.com/pion/datachannel v1.4.21/go.mod h1:oiNyP4gHx2DIwRzX/MFyH0Rz/Gz05OgBlayAI2hAWjg=
github.com/pion/dtls/v2 v2.0.9/go.mod h1:O0Wr7si/Zj5/EBFlDzDd6UtVxx25CE1r7XM7BQKYQho=
@@ -377,82 +212,41 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s=
github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II=
github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI=
github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE=
github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo=
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
github.com/sclevine/spec v1.4.0 h1:z/Q9idDcay5m5irkZ28M7PtQM4aOISzOpj4bUPkDee8=
github.com/sclevine/spec v1.4.0/go.mod h1:LvpgJaFyvQzRvc1kaDs0bulYwzC70PbiYjC4QnFHkOM=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
github.com/sourcegraph/jsonrpc2 v0.0.0-20210201082850-366fbb520750/go.mod h1:ZafdZgk/axhT1cvZAPOhw+95nz2I/Ra5qMlU4gTRwIo=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
@@ -462,56 +256,29 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/thoas/go-funk v0.8.0 h1:JP9tKSvnpFVclYgDM0Is7FD9M4fhPvqA0s0BsXmzSRQ=
github.com/thoas/go-funk v0.8.0/go.mod h1:+IWnUfUmFO1+WVYQWQtIJHeRRdaIyyYglZN7xzUPe4Q=
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/twitchtv/twirp v8.1.0+incompatible h1:KGXanpa9LXdVE/V5P/tA27rkKFmXRGCtSNT7zdeeVOY=
github.com/twitchtv/twirp v8.1.0+incompatible/go.mod h1:RRJoFSAmTEh2weEqWtpPE3vFK5YBhA6bqp2l1kfCC5A=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.22.1 h1:+mkCCcOFKPnCmVYVcURKps1Xe+3zP90gSYGNfRkjoIY=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/urfave/cli/v2 v2.3.0 h1:qph92Y649prgesehzOrQjdWyxFOp/QVM+6imKHad91M=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723 h1:sHOAIxRGBp443oHZIPB+HsUGaksVCXVQENPxwTfQdH4=
go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI=
go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
@@ -519,49 +286,27 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201026091529-146b70c837a4/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
@@ -577,142 +322,77 @@ golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5R
golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1 h1:SrN+KX8Art/Sf4HNj6Zcz06G7VEz+7w9tdXTPOZ7+l4=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98 h1:LCO0fg4kb6WwkXQXRQQgUYsFeFb5taTX5WAx5O/Vt28=
google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.35.0 h1:TwIQcH3es+MojMVojxxfQ3l3OF2KzlRxML2xZq0kRo8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc/examples v0.0.0-20201209011439-fd32f6a4fefe/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -721,7 +401,6 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
@@ -733,19 +412,11 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U=
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
gopkg.in/square/go-jose.v2 v2.5.1 h1:7odma5RETjNHWJnR32wx8t+Io4djHE1PqxCFx3iiZ2w=
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -758,12 +429,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU=

View File

@@ -3,9 +3,9 @@ package serverlogger
import (
"github.com/go-logr/logr"
"github.com/go-logr/zapr"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/protocol/logger"
"github.com/pion/ion-sfu/pkg/buffer"
"github.com/pion/ion-sfu/pkg/sfu"
"github.com/pion/logging"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"

View File

@@ -4,8 +4,8 @@ import (
"errors"
"net"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/pion/ice/v2"
"github.com/pion/ion-sfu/pkg/buffer"
"github.com/pion/webrtc/v3"
"github.com/livekit/livekit-server/pkg/config"

View File

@@ -6,12 +6,12 @@ import (
"sync/atomic"
"time"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/twcc"
"github.com/livekit/protocol/logger"
livekit "github.com/livekit/protocol/proto"
"github.com/livekit/protocol/utils"
"github.com/pion/ion-sfu/pkg/buffer"
"github.com/pion/ion-sfu/pkg/sfu"
"github.com/pion/ion-sfu/pkg/twcc"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"
"github.com/pion/webrtc/v3/pkg/rtcerr"

View File

@@ -9,11 +9,11 @@ import (
"time"
lru "github.com/hashicorp/golang-lru"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/livekit/livekit-server/pkg/sfu/twcc"
"github.com/livekit/protocol/logger"
livekit "github.com/livekit/protocol/proto"
"github.com/livekit/protocol/utils"
"github.com/pion/ion-sfu/pkg/sfu"
"github.com/pion/ion-sfu/pkg/twcc"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"
"github.com/pkg/errors"

View File

@@ -14,7 +14,7 @@ type pliThrottle struct {
lastSent map[uint32]int64
}
// github.com/pion/ion-sfu/pkg/sfu/simulcast.go
// github.com/livekit/livekit-server/pkg/sfu/simulcast.go
const (
fullResolution = "f"
halfResolution = "h"

View File

@@ -8,9 +8,9 @@ import (
"time"
"github.com/go-logr/logr"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/protocol/logger"
livekit "github.com/livekit/protocol/proto"
"github.com/pion/ion-sfu/pkg/buffer"
"google.golang.org/protobuf/proto"
"github.com/livekit/livekit-server/pkg/config"

View File

@@ -4,9 +4,9 @@ import (
"time"
"github.com/bep/debounce"
"github.com/livekit/livekit-server/pkg/sfu"
livekit "github.com/livekit/protocol/proto"
"github.com/livekit/protocol/utils"
"github.com/pion/ion-sfu/pkg/sfu"
"github.com/pion/webrtc/v3"
)

View File

@@ -3,7 +3,7 @@ package types
import (
"time"
"github.com/pion/ion-sfu/pkg/sfu"
"github.com/livekit/livekit-server/pkg/sfu"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"

View File

@@ -5,8 +5,8 @@ import (
"sync"
"github.com/livekit/livekit-server/pkg/rtc/types"
"github.com/livekit/livekit-server/pkg/sfu"
livekit "github.com/livekit/protocol/proto"
"github.com/pion/ion-sfu/pkg/sfu"
)
type FakeSubscribedTrack struct {

View File

@@ -1,7 +1,7 @@
package rtc
import (
"github.com/pion/ion-sfu/pkg/sfu"
"github.com/livekit/livekit-server/pkg/sfu"
)
// wrapper around WebRTC receiver, overriding its ID

74
pkg/sfu/atomic.go Normal file
View File

@@ -0,0 +1,74 @@
package sfu
import "sync/atomic"
type atomicBool int32
func (a *atomicBool) set(value bool) (swapped bool) {
if value {
return atomic.SwapInt32((*int32)(a), 1) == 0
}
return atomic.SwapInt32((*int32)(a), 0) == 1
}
func (a *atomicBool) get() bool {
return atomic.LoadInt32((*int32)(a)) != 0
}
type atomicUint8 uint32
func (a *atomicUint8) set(value uint8) {
atomic.StoreUint32((*uint32)(a), uint32(value))
}
func (a *atomicUint8) get() uint8 {
return uint8(atomic.LoadUint32((*uint32)(a)))
}
type atomicUint16 uint32
func (a *atomicUint16) set(value uint16) {
atomic.StoreUint32((*uint32)(a), uint32(value))
}
func (a *atomicUint16) get() uint16 {
return uint16(atomic.LoadUint32((*uint32)(a)))
}
func (a *atomicUint16) add(value uint16) {
atomic.AddUint32((*uint32)(a), uint32(value))
}
type atomicInt32 int32
func (a *atomicInt32) set(value int32) {
atomic.StoreInt32((*int32)(a), value)
}
func (a *atomicInt32) get() int32 {
return atomic.LoadInt32((*int32)(a))
}
type atomicUint32 uint32
func (a *atomicUint32) set(value uint32) {
atomic.StoreUint32((*uint32)(a), value)
}
func (a *atomicUint32) add(value uint32) {
atomic.AddUint32((*uint32)(a), value)
}
func (a *atomicUint32) get() uint32 {
return atomic.LoadUint32((*uint32)(a))
}
type atomicInt64 int64
func (a *atomicInt64) set(value int64) {
atomic.StoreInt64((*int64)(a), value)
}
func (a *atomicInt64) get() int64 {
return atomic.LoadInt64((*int64)(a))
}

112
pkg/sfu/audioobserver.go Normal file
View File

@@ -0,0 +1,112 @@
package sfu
import (
"sort"
"sync"
)
type audioStream struct {
id string
sum int
total int
}
type AudioObserver struct {
sync.RWMutex
streams []*audioStream
expected int
threshold uint8
previous []string
}
func NewAudioObserver(threshold uint8, interval, filter int) *AudioObserver {
if threshold > 127 {
threshold = 127
}
if filter < 0 {
filter = 0
}
if filter > 100 {
filter = 100
}
return &AudioObserver{
threshold: threshold,
expected: interval * filter / 2000,
}
}
func (a *AudioObserver) addStream(streamID string) {
a.Lock()
a.streams = append(a.streams, &audioStream{id: streamID})
a.Unlock()
}
func (a *AudioObserver) removeStream(streamID string) {
a.Lock()
defer a.Unlock()
idx := -1
for i, s := range a.streams {
if s.id == streamID {
idx = i
break
}
}
if idx == -1 {
return
}
a.streams[idx] = a.streams[len(a.streams)-1]
a.streams[len(a.streams)-1] = nil
a.streams = a.streams[:len(a.streams)-1]
}
func (a *AudioObserver) observe(streamID string, dBov uint8) {
a.RLock()
defer a.RUnlock()
for _, as := range a.streams {
if as.id == streamID {
if dBov <= a.threshold {
as.sum += int(dBov)
as.total++
}
return
}
}
}
func (a *AudioObserver) Calc() []string {
a.Lock()
defer a.Unlock()
sort.Slice(a.streams, func(i, j int) bool {
si, sj := a.streams[i], a.streams[j]
switch {
case si.total != sj.total:
return si.total > sj.total
default:
return si.sum < sj.sum
}
})
streamIDs := make([]string, 0, len(a.streams))
for _, s := range a.streams {
if s.total >= a.expected {
streamIDs = append(streamIDs, s.id)
}
s.total = 0
s.sum = 0
}
if len(a.previous) == len(streamIDs) {
for i, s := range a.previous {
if s != streamIDs[i] {
a.previous = streamIDs
return streamIDs
}
}
return nil
}
a.previous = streamIDs
return streamIDs
}

View File

@@ -0,0 +1,271 @@
package sfu
import (
"reflect"
"testing"
"github.com/stretchr/testify/assert"
)
func Test_audioLevel_addStream(t *testing.T) {
type args struct {
streamID string
}
tests := []struct {
name string
args args
want audioStream
}{
{
name: "Must add stream to audio level monitor",
args: args{
streamID: "a",
},
want: audioStream{
id: "a",
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
a := &AudioObserver{}
a.addStream(tt.args.streamID)
assert.Equal(t, tt.want, *a.streams[0])
})
}
}
func Test_audioLevel_calc(t *testing.T) {
type fields struct {
streams []*audioStream
expected int
previous []string
}
tests := []struct {
name string
fields fields
want []string
}{
{
name: "Must return streams that are above filter",
fields: fields{
streams: []*audioStream{
{
id: "a",
sum: 1,
total: 5,
},
{
id: "b",
sum: 2,
total: 5,
},
{
id: "c",
sum: 2,
total: 2,
},
},
expected: 3,
},
want: []string{"a", "b"},
},
{
name: "Must return nil if result is same as previous",
fields: fields{
streams: []*audioStream{
{
id: "a",
sum: 1,
total: 5,
},
{
id: "b",
sum: 2,
total: 5,
},
{
id: "c",
sum: 2,
total: 2,
},
},
expected: 3,
previous: []string{"a", "b"},
},
want: nil,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
a := &AudioObserver{
streams: tt.fields.streams,
expected: tt.fields.expected,
previous: tt.fields.previous,
}
if got := a.Calc(); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Calc() = %v, want %v", got, tt.want)
}
})
}
}
func Test_audioLevel_observe(t *testing.T) {
type fields struct {
streams []*audioStream
threshold uint8
}
type args struct {
streamID string
dBov uint8
}
tests := []struct {
name string
fields fields
args args
want audioStream
}{
{
name: "Must increase sum and total when dBov is above threshold",
fields: fields{
streams: []*audioStream{
{
id: "a",
sum: 0,
total: 0,
},
},
threshold: 40,
},
args: args{
streamID: "a",
dBov: 20,
},
want: audioStream{
id: "a",
sum: 20,
total: 1,
},
},
{
name: "Must not increase sum and total when dBov is below threshold",
fields: fields{
streams: []*audioStream{
{
id: "a",
sum: 0,
total: 0,
},
},
threshold: 40,
},
args: args{
streamID: "a",
dBov: 60,
},
want: audioStream{
id: "a",
sum: 0,
total: 0,
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
a := &AudioObserver{
streams: tt.fields.streams,
threshold: tt.fields.threshold,
}
a.observe(tt.args.streamID, tt.args.dBov)
assert.Equal(t, *a.streams[0], tt.want)
})
}
}
func Test_audioLevel_removeStream(t *testing.T) {
type fields struct {
streams []*audioStream
}
type args struct {
streamID string
}
tests := []struct {
name string
fields fields
args args
}{
{
name: "Must remove correct ID",
fields: fields{
streams: []*audioStream{
{
id: "a",
},
{
id: "b",
},
{
id: "c",
},
{
id: "d",
},
},
},
args: args{
streamID: "b",
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
a := &AudioObserver{
streams: tt.fields.streams,
}
a.removeStream(tt.args.streamID)
assert.Equal(t, len(a.streams), len(tt.fields.streams)-1)
for _, s := range a.streams {
assert.NotEqual(t, s.id, tt.args.streamID)
}
})
}
}
func Test_newAudioLevel(t *testing.T) {
type args struct {
threshold uint8
interval int
filter int
}
tests := []struct {
name string
args args
want *AudioObserver
}{
{
name: "Must return a new audio level",
args: args{
threshold: 40,
interval: 1000,
filter: 20,
},
want: &AudioObserver{
expected: 1000 * 20 / 2000,
threshold: 40,
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
if got := NewAudioObserver(tt.args.threshold, tt.args.interval, tt.args.filter); !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewAudioLevel() = %v, want %v", got, tt.want)
}
})
}
}

114
pkg/sfu/buffer/bucket.go Normal file
View File

@@ -0,0 +1,114 @@
package buffer
import (
"encoding/binary"
"math"
)
const maxPktSize = 1500
type Bucket struct {
buf []byte
src *[]byte
init bool
step int
headSN uint16
maxSteps int
}
func NewBucket(buf *[]byte) *Bucket {
return &Bucket{
src: buf,
buf: *buf,
maxSteps: int(math.Floor(float64(len(*buf))/float64(maxPktSize))) - 1,
}
}
func (b *Bucket) AddPacket(pkt []byte, sn uint16, latest bool) ([]byte, error) {
if !b.init {
b.headSN = sn - 1
b.init = true
}
if !latest {
return b.set(sn, pkt)
}
diff := sn - b.headSN
b.headSN = sn
for i := uint16(1); i < diff; i++ {
b.step++
if b.step >= b.maxSteps {
b.step = 0
}
}
return b.push(pkt), nil
}
func (b *Bucket) GetPacket(buf []byte, sn uint16) (i int, err error) {
p := b.get(sn)
if p == nil {
err = errPacketNotFound
return
}
i = len(p)
if cap(buf) < i {
err = errBufferTooSmall
return
}
if len(buf) < i {
buf = buf[:i]
}
copy(buf, p)
return
}
func (b *Bucket) push(pkt []byte) []byte {
binary.BigEndian.PutUint16(b.buf[b.step*maxPktSize:], uint16(len(pkt)))
off := b.step*maxPktSize + 2
copy(b.buf[off:], pkt)
b.step++
if b.step > b.maxSteps {
b.step = 0
}
return b.buf[off : off+len(pkt)]
}
func (b *Bucket) get(sn uint16) []byte {
pos := b.step - int(b.headSN-sn+1)
if pos < 0 {
if pos*-1 > b.maxSteps+1 {
return nil
}
pos = b.maxSteps + pos + 1
}
off := pos * maxPktSize
if off > len(b.buf) {
return nil
}
if binary.BigEndian.Uint16(b.buf[off+4:off+6]) != sn {
return nil
}
sz := int(binary.BigEndian.Uint16(b.buf[off : off+2]))
return b.buf[off+2 : off+2+sz]
}
func (b *Bucket) set(sn uint16, pkt []byte) ([]byte, error) {
if b.headSN-sn >= uint16(b.maxSteps+1) {
return nil, errPacketTooOld
}
pos := b.step - int(b.headSN-sn+1)
if pos < 0 {
pos = b.maxSteps + pos + 1
}
off := pos * maxPktSize
if off > len(b.buf) || off < 0 {
return nil, errPacketTooOld
}
// Do not overwrite if packet exist
if binary.BigEndian.Uint16(b.buf[off+4:off+6]) == sn {
return nil, errRTXPacket
}
binary.BigEndian.PutUint16(b.buf[off:], uint16(len(pkt)))
copy(b.buf[off+2:], pkt)
return b.buf[off+2 : off+2+len(pkt)], nil
}

View File

@@ -0,0 +1,139 @@
package buffer
import (
"testing"
"github.com/pion/rtp"
"github.com/stretchr/testify/assert"
)
var TestPackets = []*rtp.Packet{
{
Header: rtp.Header{
SequenceNumber: 1,
},
},
{
Header: rtp.Header{
SequenceNumber: 3,
},
},
{
Header: rtp.Header{
SequenceNumber: 4,
},
},
{
Header: rtp.Header{
SequenceNumber: 6,
},
},
{
Header: rtp.Header{
SequenceNumber: 7,
},
},
{
Header: rtp.Header{
SequenceNumber: 10,
},
},
}
func Test_queue(t *testing.T) {
b := make([]byte, 25000)
q := NewBucket(&b)
for _, p := range TestPackets {
p := p
buf, err := p.Marshal()
assert.NoError(t, err)
assert.NotPanics(t, func() {
q.AddPacket(buf, p.SequenceNumber, true)
})
}
var expectedSN uint16
expectedSN = 6
np := rtp.Packet{}
buff := make([]byte, maxPktSize)
i, err := q.GetPacket(buff, 6)
assert.NoError(t, err)
err = np.Unmarshal(buff[:i])
assert.NoError(t, err)
assert.Equal(t, expectedSN, np.SequenceNumber)
np2 := &rtp.Packet{
Header: rtp.Header{
SequenceNumber: 8,
},
}
buf, err := np2.Marshal()
assert.NoError(t, err)
expectedSN = 8
q.AddPacket(buf, 8, false)
i, err = q.GetPacket(buff, expectedSN)
assert.NoError(t, err)
err = np.Unmarshal(buff[:i])
assert.NoError(t, err)
assert.Equal(t, expectedSN, np.SequenceNumber)
_, err = q.AddPacket(buf, 8, false)
assert.ErrorIs(t, err, errRTXPacket)
}
func Test_queue_edges(t *testing.T) {
var TestPackets = []*rtp.Packet{
{
Header: rtp.Header{
SequenceNumber: 65533,
},
},
{
Header: rtp.Header{
SequenceNumber: 65534,
},
},
{
Header: rtp.Header{
SequenceNumber: 2,
},
},
}
b := make([]byte, 25000)
q := NewBucket(&b)
for _, p := range TestPackets {
p := p
assert.NotNil(t, p)
assert.NotPanics(t, func() {
p := p
buf, err := p.Marshal()
assert.NoError(t, err)
assert.NotPanics(t, func() {
q.AddPacket(buf, p.SequenceNumber, true)
})
})
}
var expectedSN uint16
expectedSN = 65534
np := rtp.Packet{}
buff := make([]byte, maxPktSize)
i, err := q.GetPacket(buff, expectedSN)
assert.NoError(t, err)
err = np.Unmarshal(buff[:i])
assert.NoError(t, err)
assert.Equal(t, expectedSN, np.SequenceNumber)
np2 := rtp.Packet{
Header: rtp.Header{
SequenceNumber: 65535,
},
}
buf, err := np2.Marshal()
assert.NoError(t, err)
q.AddPacket(buf, np2.SequenceNumber, false)
i, err = q.GetPacket(buff, expectedSN+1)
assert.NoError(t, err)
err = np.Unmarshal(buff[:i])
assert.NoError(t, err)
assert.Equal(t, expectedSN+1, np.SequenceNumber)
}

715
pkg/sfu/buffer/buffer.go Normal file
View File

@@ -0,0 +1,715 @@
package buffer
import (
"encoding/binary"
"io"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/gammazero/deque"
"github.com/go-logr/logr"
"github.com/pion/rtcp"
"github.com/pion/rtp"
"github.com/pion/sdp/v3"
"github.com/pion/webrtc/v3"
)
const (
MaxSN = 1 << 16
reportDelta = 1e9
)
// Logger is an implementation of logr.Logger. If is not provided - will be turned off.
var Logger logr.Logger = logr.Discard()
type pendingPackets struct {
arrivalTime int64
packet []byte
}
type ExtPacket struct {
Head bool
Cycle uint32
Arrival int64
Packet rtp.Packet
Payload interface{}
KeyFrame bool
}
// Buffer contains all packets
type Buffer struct {
sync.Mutex
bucket *Bucket
nacker *nackQueue
videoPool *sync.Pool
audioPool *sync.Pool
codecType webrtc.RTPCodecType
extPackets deque.Deque
pPackets []pendingPackets
closeOnce sync.Once
mediaSSRC uint32
clockRate uint32
maxBitrate uint64
lastReport int64
twccExt uint8
audioExt uint8
bound bool
closed atomicBool
mime string
// supported feedbacks
remb bool
nack bool
twcc bool
audioLevel bool
minPacketProbe int
lastPacketRead int
maxTemporalLayer int32
bitrate atomic.Value
bitrateHelper [4]uint64
lastSRNTPTime uint64
lastSRRTPTime uint32
lastSRRecv int64 // Represents wall clock of the most recent sender report arrival
baseSN uint16
lastRtcpPacketTime int64 // Time the last RTCP packet was received.
lastRtcpSrTime int64 // Time the last RTCP SR was received. Required for DLSR computation.
lastTransit uint32
seqHdlr SeqWrapHandler
stats Stats
latestTimestamp uint32 // latest received RTP timestamp on packet
latestTimestampTime int64 // Time of the latest timestamp (in nanos since unix epoch)
lastFractionLostToReport uint8 // Last fractionlost from subscribers, should report to publisher; Audio only
// callbacks
onClose func()
onAudioLevel func(level uint8)
feedbackCB func([]rtcp.Packet)
feedbackTWCC func(sn uint16, timeNS int64, marker bool)
// logger
logger logr.Logger
}
type Stats struct {
LastExpected uint32
LastReceived uint32
LostRate float32
PacketCount uint32 // Number of packets received from this source.
Jitter float64 // An estimate of the statistical variance of the RTP data packet inter-arrival time.
TotalByte uint64
}
// BufferOptions provides configuration options for the buffer
type Options struct {
MaxBitRate uint64
}
// NewBuffer constructs a new Buffer
func NewBuffer(ssrc uint32, vp, ap *sync.Pool, logger logr.Logger) *Buffer {
b := &Buffer{
mediaSSRC: ssrc,
videoPool: vp,
audioPool: ap,
logger: logger,
}
b.bitrate.Store(make([]uint64, len(b.bitrateHelper)))
b.extPackets.SetMinCapacity(7)
return b
}
func (b *Buffer) Bind(params webrtc.RTPParameters, o Options) {
b.Lock()
defer b.Unlock()
codec := params.Codecs[0]
b.clockRate = codec.ClockRate
b.maxBitrate = o.MaxBitRate
b.mime = strings.ToLower(codec.MimeType)
switch {
case strings.HasPrefix(b.mime, "audio/"):
b.codecType = webrtc.RTPCodecTypeAudio
b.bucket = NewBucket(b.audioPool.Get().(*[]byte))
case strings.HasPrefix(b.mime, "video/"):
b.codecType = webrtc.RTPCodecTypeVideo
b.bucket = NewBucket(b.videoPool.Get().(*[]byte))
default:
b.codecType = webrtc.RTPCodecType(0)
}
for _, ext := range params.HeaderExtensions {
if ext.URI == sdp.TransportCCURI {
b.twccExt = uint8(ext.ID)
break
}
}
if b.codecType == webrtc.RTPCodecTypeVideo {
for _, fb := range codec.RTCPFeedback {
switch fb.Type {
case webrtc.TypeRTCPFBGoogREMB:
b.logger.V(1).Info("Setting feedback", "type", "webrtc.TypeRTCPFBGoogREMB")
b.remb = true
case webrtc.TypeRTCPFBTransportCC:
b.logger.V(1).Info("Setting feedback", "type", webrtc.TypeRTCPFBTransportCC)
b.twcc = true
case webrtc.TypeRTCPFBNACK:
b.logger.V(1).Info("Setting feedback", "type", webrtc.TypeRTCPFBNACK)
b.nacker = newNACKQueue()
b.nack = true
}
}
} else if b.codecType == webrtc.RTPCodecTypeAudio {
for _, h := range params.HeaderExtensions {
if h.URI == sdp.AudioLevelURI {
b.audioLevel = true
b.audioExt = uint8(h.ID)
}
}
}
for _, pp := range b.pPackets {
b.calc(pp.packet, pp.arrivalTime)
}
b.pPackets = nil
b.bound = true
b.logger.V(1).Info("NewBuffer", "MaxBitRate", o.MaxBitRate)
}
// Write adds a RTP Packet, out of order, new packet may be arrived later
func (b *Buffer) Write(pkt []byte) (n int, err error) {
b.Lock()
defer b.Unlock()
if b.closed.get() {
err = io.EOF
return
}
if !b.bound {
packet := make([]byte, len(pkt))
copy(packet, pkt)
b.pPackets = append(b.pPackets, pendingPackets{
packet: packet,
arrivalTime: time.Now().UnixNano(),
})
return
}
b.calc(pkt, time.Now().UnixNano())
return
}
func (b *Buffer) Read(buff []byte) (n int, err error) {
for {
if b.closed.get() {
err = io.EOF
return
}
b.Lock()
if b.pPackets != nil && len(b.pPackets) > b.lastPacketRead {
if len(buff) < len(b.pPackets[b.lastPacketRead].packet) {
err = errBufferTooSmall
b.Unlock()
return
}
n = len(b.pPackets[b.lastPacketRead].packet)
copy(buff, b.pPackets[b.lastPacketRead].packet)
b.lastPacketRead++
b.Unlock()
return
}
b.Unlock()
time.Sleep(25 * time.Millisecond)
}
}
func (b *Buffer) ReadExtended() (*ExtPacket, error) {
for {
if b.closed.get() {
return nil, io.EOF
}
b.Lock()
if b.extPackets.Len() > 0 {
extPkt := b.extPackets.PopFront().(*ExtPacket)
b.Unlock()
return extPkt, nil
}
b.Unlock()
time.Sleep(10 * time.Millisecond)
}
}
func (b *Buffer) Close() error {
b.Lock()
defer b.Unlock()
b.closeOnce.Do(func() {
if b.bucket != nil && b.codecType == webrtc.RTPCodecTypeVideo {
b.videoPool.Put(b.bucket.src)
}
if b.bucket != nil && b.codecType == webrtc.RTPCodecTypeAudio {
b.audioPool.Put(b.bucket.src)
}
b.closed.set(true)
b.onClose()
})
return nil
}
func (b *Buffer) OnClose(fn func()) {
b.onClose = fn
}
func (b *Buffer) calc(pkt []byte, arrivalTime int64) {
sn := binary.BigEndian.Uint16(pkt[2:4])
var headPkt bool
if b.stats.PacketCount == 0 {
b.baseSN = sn
b.lastReport = arrivalTime
b.seqHdlr.UpdateMaxSeq(uint32(sn))
headPkt = true
} else {
extSN, isNewer := b.seqHdlr.Unwrap(sn)
if b.nack {
if isNewer {
for i := b.seqHdlr.MaxSeqNo() + 1; i < extSN; i++ {
b.nacker.push(i)
}
} else {
b.nacker.remove(extSN)
}
}
if isNewer {
b.seqHdlr.UpdateMaxSeq(extSN)
}
headPkt = isNewer
}
var p rtp.Packet
pb, err := b.bucket.AddPacket(pkt, sn, headPkt)
if err != nil {
if err == errRTXPacket {
return
}
return
}
if err = p.Unmarshal(pb); err != nil {
return
}
// submit to TWCC even if it is a padding only packet. Clients use padding only packets as probes
// for bandwidth estimation
if b.twcc {
if ext := p.GetExtension(b.twccExt); ext != nil && len(ext) > 1 {
b.feedbackTWCC(binary.BigEndian.Uint16(ext[0:2]), arrivalTime, p.Marker)
}
}
b.stats.TotalByte += uint64(len(pkt))
b.stats.PacketCount++
ep := ExtPacket{
Head: headPkt,
Cycle: b.seqHdlr.Cycles(),
Packet: p,
Arrival: arrivalTime,
}
if len(p.Payload) == 0 {
// padding only packet, nothing else to do
b.extPackets.PushBack(&ep)
return
}
temporalLayer := int32(0)
switch b.mime {
case "video/vp8":
vp8Packet := VP8{}
if err := vp8Packet.Unmarshal(p.Payload); err != nil {
return
}
ep.Payload = vp8Packet
ep.KeyFrame = vp8Packet.IsKeyFrame
temporalLayer = int32(vp8Packet.TID)
case "video/h264":
ep.KeyFrame = isH264Keyframe(p.Payload)
}
if b.minPacketProbe < 25 {
if sn < b.baseSN {
b.baseSN = sn
}
if b.mime == "video/vp8" {
pld := ep.Payload.(VP8)
mtl := atomic.LoadInt32(&b.maxTemporalLayer)
if mtl < int32(pld.TID) {
atomic.StoreInt32(&b.maxTemporalLayer, int32(pld.TID))
}
}
b.minPacketProbe++
}
b.extPackets.PushBack(&ep)
// if first time update or the timestamp is later (factoring timestamp wrap around)
latestTimestamp := atomic.LoadUint32(&b.latestTimestamp)
latestTimestampTimeInNanosSinceEpoch := atomic.LoadInt64(&b.latestTimestampTime)
if (latestTimestampTimeInNanosSinceEpoch == 0) || IsLaterTimestamp(p.Timestamp, latestTimestamp) {
atomic.StoreUint32(&b.latestTimestamp, p.Timestamp)
atomic.StoreInt64(&b.latestTimestampTime, arrivalTime)
}
arrival := uint32(arrivalTime / 1e6 * int64(b.clockRate/1e3))
transit := arrival - p.Timestamp
if b.lastTransit != 0 {
d := int32(transit - b.lastTransit)
if d < 0 {
d = -d
}
b.stats.Jitter += (float64(d) - b.stats.Jitter) / 16
}
b.lastTransit = transit
if b.audioLevel {
if e := p.GetExtension(b.audioExt); e != nil && b.onAudioLevel != nil {
ext := rtp.AudioLevelExtension{}
if err := ext.Unmarshal(e); err == nil {
b.onAudioLevel(ext.Level)
}
}
}
if b.nacker != nil {
if r := b.buildNACKPacket(); r != nil {
b.feedbackCB(r)
}
}
b.bitrateHelper[temporalLayer] += uint64(len(pkt))
diff := arrivalTime - b.lastReport
if diff >= reportDelta {
// LK-TODO-START
// As this happens in the data path, if there are no packets received
// in an interval, the bitrate is stuck with the old value. GetBitrate()
// method in sfu.Receiver uses the availableLayers set by stream
// tracker to report 0 bitrate if a layer is not available. But, stream
// tracker is not run for the lowest layer. So, if the lowest layer stops,
// stale bitrate will be reported. The simplest thing might be to run the
// stream tracker on all layers to address this. Another option to look at
// is some monitoring loop running at low frequency and reporting bitrate.
// LK-TODO-END
bitrates, ok := b.bitrate.Load().([]uint64)
if !ok {
bitrates = make([]uint64, len(b.bitrateHelper))
}
for i := 0; i < len(b.bitrateHelper); i++ {
br := (8 * b.bitrateHelper[i] * uint64(reportDelta)) / uint64(diff)
bitrates[i] = br
b.bitrateHelper[i] = 0
}
b.bitrate.Store(bitrates)
b.feedbackCB(b.getRTCP())
b.lastReport = arrivalTime
}
}
func (b *Buffer) buildNACKPacket() []rtcp.Packet {
if nacks, askKeyframe := b.nacker.pairs(b.seqHdlr.MaxSeqNo()); (nacks != nil && len(nacks) > 0) || askKeyframe {
var pkts []rtcp.Packet
if len(nacks) > 0 {
pkts = []rtcp.Packet{&rtcp.TransportLayerNack{
MediaSSRC: b.mediaSSRC,
Nacks: nacks,
}}
}
if askKeyframe {
pkts = append(pkts, &rtcp.PictureLossIndication{
MediaSSRC: b.mediaSSRC,
})
}
return pkts
}
return nil
}
func (b *Buffer) buildREMBPacket() *rtcp.ReceiverEstimatedMaximumBitrate {
br := b.Bitrate()
if b.stats.LostRate < 0.02 {
br = uint64(float64(br)*1.09) + 2000
}
if b.stats.LostRate > .1 {
br = uint64(float64(br) * float64(1-0.5*b.stats.LostRate))
}
if br > b.maxBitrate {
br = b.maxBitrate
}
if br < 100000 {
br = 100000
}
b.stats.TotalByte = 0
return &rtcp.ReceiverEstimatedMaximumBitrate{
Bitrate: float32(br),
SSRCs: []uint32{b.mediaSSRC},
}
}
func (b *Buffer) buildReceptionReport() rtcp.ReceptionReport {
extMaxSeq := b.seqHdlr.MaxSeqNo()
expected := extMaxSeq - uint32(b.baseSN) + 1
lost := uint32(0)
if b.stats.PacketCount < expected && b.stats.PacketCount != 0 {
lost = expected - b.stats.PacketCount
}
expectedInterval := expected - b.stats.LastExpected
b.stats.LastExpected = expected
receivedInterval := b.stats.PacketCount - b.stats.LastReceived
b.stats.LastReceived = b.stats.PacketCount
lostInterval := expectedInterval - receivedInterval
b.stats.LostRate = float32(lostInterval) / float32(expectedInterval)
var fracLost uint8
if expectedInterval != 0 && lostInterval > 0 {
fracLost = uint8((lostInterval << 8) / expectedInterval)
}
if b.lastFractionLostToReport > fracLost {
// If fractionlost from subscriber is bigger than sfu received, use it.
fracLost = b.lastFractionLostToReport
}
var dlsr uint32
if b.lastSRRecv != 0 {
delayMS := uint32((time.Now().UnixNano() - b.lastSRRecv) / 1e6)
dlsr = (delayMS / 1e3) << 16
dlsr |= (delayMS % 1e3) * 65536 / 1000
}
rr := rtcp.ReceptionReport{
SSRC: b.mediaSSRC,
FractionLost: fracLost,
TotalLost: lost,
LastSequenceNumber: extMaxSeq,
Jitter: uint32(b.stats.Jitter),
LastSenderReport: uint32(b.lastSRNTPTime >> 16),
Delay: dlsr,
}
return rr
}
func (b *Buffer) SetSenderReportData(rtpTime uint32, ntpTime uint64) {
b.Lock()
b.lastSRRTPTime = rtpTime
b.lastSRNTPTime = ntpTime
b.lastSRRecv = time.Now().UnixNano()
b.Unlock()
}
func (b *Buffer) SetLastFractionLostReport(lost uint8) {
b.lastFractionLostToReport = lost
}
func (b *Buffer) getRTCP() []rtcp.Packet {
var pkts []rtcp.Packet
pkts = append(pkts, &rtcp.ReceiverReport{
Reports: []rtcp.ReceptionReport{b.buildReceptionReport()},
})
if b.remb && !b.twcc {
pkts = append(pkts, b.buildREMBPacket())
}
return pkts
}
func (b *Buffer) GetPacket(buff []byte, sn uint16) (int, error) {
b.Lock()
defer b.Unlock()
if b.closed.get() {
return 0, io.EOF
}
return b.bucket.GetPacket(buff, sn)
}
// Bitrate returns the current publisher stream bitrate.
func (b *Buffer) Bitrate() uint64 {
bitrates, ok := b.bitrate.Load().([]uint64)
bitrate := uint64(0)
if ok {
for _, b := range bitrates {
bitrate += b
}
}
return bitrate
}
// BitrateTemporal returns the current publisher stream bitrate temporal layer wise.
func (b *Buffer) BitrateTemporal() []uint64 {
bitrates, ok := b.bitrate.Load().([]uint64)
if !ok {
return make([]uint64, len(b.bitrateHelper))
}
// copy and return
brs := make([]uint64, len(bitrates))
copy(brs, bitrates)
return brs
}
// BitrateTemporalCumulative returns the current publisher stream bitrate temporal layer accumulated with lower temporal layers.
func (b *Buffer) BitrateTemporalCumulative() []uint64 {
bitrates, ok := b.bitrate.Load().([]uint64)
if !ok {
return make([]uint64, len(b.bitrateHelper))
}
// copy and process
brs := make([]uint64, len(bitrates))
copy(brs, bitrates)
for i := len(brs) - 1; i >= 1; i-- {
if brs[i] != 0 {
for j := i - 1; j >= 0; j-- {
brs[i] += brs[j]
}
}
}
return brs
}
func (b *Buffer) MaxTemporalLayer() int32 {
return atomic.LoadInt32(&b.maxTemporalLayer)
}
func (b *Buffer) OnTransportWideCC(fn func(sn uint16, timeNS int64, marker bool)) {
b.feedbackTWCC = fn
}
func (b *Buffer) OnFeedback(fn func(fb []rtcp.Packet)) {
b.feedbackCB = fn
}
func (b *Buffer) OnAudioLevel(fn func(level uint8)) {
b.onAudioLevel = fn
}
// GetMediaSSRC returns the associated SSRC of the RTP stream
func (b *Buffer) GetMediaSSRC() uint32 {
return b.mediaSSRC
}
// GetClockRate returns the RTP clock rate
func (b *Buffer) GetClockRate() uint32 {
return b.clockRate
}
// GetSenderReportData returns the rtp, ntp and nanos of the last sender report
func (b *Buffer) GetSenderReportData() (rtpTime uint32, ntpTime uint64, lastReceivedTimeInNanosSinceEpoch int64) {
rtpTime = atomic.LoadUint32(&b.lastSRRTPTime)
ntpTime = atomic.LoadUint64(&b.lastSRNTPTime)
lastReceivedTimeInNanosSinceEpoch = atomic.LoadInt64(&b.lastSRRecv)
return rtpTime, ntpTime, lastReceivedTimeInNanosSinceEpoch
}
// GetStats returns the raw statistics about a particular buffer state
func (b *Buffer) GetStats() (stats Stats) {
b.Lock()
stats = b.stats
b.Unlock()
return
}
// GetLatestTimestamp returns the latest RTP timestamp factoring in potential RTP timestamp wrap-around
func (b *Buffer) GetLatestTimestamp() (latestTimestamp uint32, latestTimestampTimeInNanosSinceEpoch int64) {
latestTimestamp = atomic.LoadUint32(&b.latestTimestamp)
latestTimestampTimeInNanosSinceEpoch = atomic.LoadInt64(&b.latestTimestampTime)
return latestTimestamp, latestTimestampTimeInNanosSinceEpoch
}
// IsTimestampWrapAround returns true if wrap around happens from timestamp1 to timestamp2
func IsTimestampWrapAround(timestamp1 uint32, timestamp2 uint32) bool {
return timestamp2 < timestamp1 && timestamp1 > 0xf0000000 && timestamp2 < 0x0fffffff
}
// IsLaterTimestamp returns true if timestamp1 is later in time than timestamp2 factoring in timestamp wrap-around
func IsLaterTimestamp(timestamp1 uint32, timestamp2 uint32) bool {
if timestamp1 > timestamp2 {
if IsTimestampWrapAround(timestamp1, timestamp2) {
return false
}
return true
}
if IsTimestampWrapAround(timestamp2, timestamp1) {
return true
}
return false
}
func isNewerUint16(val1, val2 uint16) bool {
return val1 != val2 && val1-val2 < 0x8000
}
type SeqWrapHandler struct {
maxSeqNo uint32
}
func (s *SeqWrapHandler) Cycles() uint32 {
return s.maxSeqNo & 0xffff0000
}
func (s *SeqWrapHandler) MaxSeqNo() uint32 {
return s.maxSeqNo
}
// unwrap seq and update the maxSeqNo. return unwraped value, and whether seq is newer
func (s *SeqWrapHandler) Unwrap(seq uint16) (uint32, bool) {
maxSeqNo := uint16(s.maxSeqNo)
delta := int32(seq) - int32(maxSeqNo)
newer := isNewerUint16(seq, maxSeqNo)
if newer {
if delta < 0 {
// seq is newer, but less than maxSeqNo, wrap around
delta += 0x10000
}
} else {
// older value
if delta > 0 && (int32(s.maxSeqNo)+delta-0x10000) >= 0 {
// wrap backwards, should not less than 0 in this case:
// at start time, received seq 1, set s.maxSeqNo =1 ,
// then a out of order seq 65534 coming, we can't unwrap
// the seq to -2
delta -= 0x10000
}
}
unwrapped := uint32(int32(s.maxSeqNo) + delta)
return unwrapped, newer
}
func (s *SeqWrapHandler) UpdateMaxSeq(extSeq uint32) {
s.maxSeqNo = extSeq
}

View File

@@ -0,0 +1,363 @@
package buffer
import (
"sync"
"testing"
"time"
"github.com/livekit/livekit-server/pkg/sfu/logger"
"github.com/pion/rtcp"
"github.com/pion/rtp"
"github.com/pion/webrtc/v3"
"github.com/stretchr/testify/assert"
)
func CreateTestPacket(pktStamp *SequenceNumberAndTimeStamp) *rtp.Packet {
if pktStamp == nil {
return &rtp.Packet{
Header: rtp.Header{},
Payload: []byte{1, 2, 3},
}
}
return &rtp.Packet{
Header: rtp.Header{
SequenceNumber: pktStamp.SequenceNumber,
Timestamp: pktStamp.Timestamp,
},
Payload: []byte{1, 2, 3},
}
}
type SequenceNumberAndTimeStamp struct {
SequenceNumber uint16
Timestamp uint32
}
func CreateTestListPackets(snsAndTSs []SequenceNumberAndTimeStamp) (packetList []*rtp.Packet) {
for _, item := range snsAndTSs {
item := item
packetList = append(packetList, CreateTestPacket(&item))
}
return packetList
}
func TestNack(t *testing.T) {
pool := &sync.Pool{
New: func() interface{} {
b := make([]byte, 1500)
return &b
},
}
logger.SetGlobalOptions(logger.GlobalConfig{V: 1}) // 2 - TRACE
logger := logger.New()
t.Run("nack normal", func(t *testing.T) {
buff := NewBuffer(123, pool, pool, logger)
buff.codecType = webrtc.RTPCodecTypeVideo
assert.NotNil(t, buff)
var wg sync.WaitGroup
// 3 nacks 1 Pli
wg.Add(4)
buff.OnFeedback(func(fb []rtcp.Packet) {
for _, pkt := range fb {
switch p := pkt.(type) {
case *rtcp.TransportLayerNack:
if p.Nacks[0].PacketList()[0] == 1 && p.MediaSSRC == 123 {
wg.Done()
}
case *rtcp.PictureLossIndication:
if p.MediaSSRC == 123 {
wg.Done()
}
}
}
})
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{
{
RTPCodecCapability: webrtc.RTPCodecCapability{
MimeType: "video/vp8",
ClockRate: 90000,
RTCPFeedback: []webrtc.RTCPFeedback{{
Type: "nack",
}},
},
PayloadType: 96,
},
},
}, Options{})
for i := 0; i < 15; i++ {
if i == 1 {
continue
}
pkt := rtp.Packet{
Header: rtp.Header{SequenceNumber: uint16(i), Timestamp: uint32(i)},
Payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1},
}
b, err := pkt.Marshal()
assert.NoError(t, err)
_, err = buff.Write(b)
assert.NoError(t, err)
}
wg.Wait()
})
t.Run("nack with seq wrap", func(t *testing.T) {
buff := NewBuffer(123, pool, pool, logger)
buff.codecType = webrtc.RTPCodecTypeVideo
assert.NotNil(t, buff)
var wg sync.WaitGroup
expects := map[uint16]int{
65534: 0,
65535: 0,
0: 0,
1: 0,
}
wg.Add(3 * len(expects)) // retry 3 times
buff.OnFeedback(func(fb []rtcp.Packet) {
for _, pkt := range fb {
switch p := pkt.(type) {
case *rtcp.TransportLayerNack:
if p.MediaSSRC == 123 {
for _, v := range p.Nacks {
v.Range(func(seq uint16) bool {
if _, ok := expects[seq]; ok {
wg.Done()
} else {
assert.Fail(t, "unexpected nack seq ", seq)
}
return true
})
}
}
case *rtcp.PictureLossIndication:
if p.MediaSSRC == 123 {
// wg.Done()
}
}
}
})
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{
{
RTPCodecCapability: webrtc.RTPCodecCapability{
MimeType: "video/vp8",
ClockRate: 90000,
RTCPFeedback: []webrtc.RTCPFeedback{{
Type: "nack",
}},
},
PayloadType: 96,
},
},
}, Options{})
for i := 0; i < 15; i++ {
if i > 0 && i < 5 {
continue
}
pkt := rtp.Packet{
Header: rtp.Header{SequenceNumber: uint16(i + 65533), Timestamp: uint32(i)},
Payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1},
}
b, err := pkt.Marshal()
assert.NoError(t, err)
_, err = buff.Write(b)
assert.NoError(t, err)
}
wg.Wait()
})
}
func TestNewBuffer(t *testing.T) {
type args struct {
options Options
ssrc uint32
}
tests := []struct {
name string
args args
}{
{
name: "Must not be nil and add packets in sequence",
args: args{
options: Options{
MaxBitRate: 1e6,
},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
var TestPackets = []*rtp.Packet{
{
Header: rtp.Header{
SequenceNumber: 65533,
},
},
{
Header: rtp.Header{
SequenceNumber: 65534,
},
},
{
Header: rtp.Header{
SequenceNumber: 2,
},
},
{
Header: rtp.Header{
SequenceNumber: 65535,
},
},
}
pool := &sync.Pool{
New: func() interface{} {
b := make([]byte, 1500)
return &b
},
}
logger.SetGlobalOptions(logger.GlobalConfig{V: 2}) // 2 - TRACE
logger := logger.New()
buff := NewBuffer(123, pool, pool, logger)
buff.codecType = webrtc.RTPCodecTypeVideo
assert.NotNil(t, buff)
assert.NotNil(t, TestPackets)
buff.OnFeedback(func(_ []rtcp.Packet) {
})
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{{
RTPCodecCapability: webrtc.RTPCodecCapability{
MimeType: "video/vp8",
ClockRate: 9600,
RTCPFeedback: nil,
},
PayloadType: 0,
}},
}, Options{})
for _, p := range TestPackets {
buf, _ := p.Marshal()
buff.Write(buf)
}
// assert.Equal(t, 6, buff.PacketQueue.size)
assert.Equal(t, uint32(1<<16), buff.seqHdlr.Cycles())
assert.Equal(t, uint16(2), uint16(buff.seqHdlr.MaxSeqNo()))
})
}
}
func TestFractionLostReport(t *testing.T) {
pool := &sync.Pool{
New: func() interface{} {
b := make([]byte, 1500)
return &b
},
}
logger.SetGlobalOptions(logger.GlobalConfig{V: 1}) // 2 - TRACE
buff := NewBuffer(123, pool, pool, logger.New())
buff.codecType = webrtc.RTPCodecTypeVideo
assert.NotNil(t, buff)
var wg sync.WaitGroup
wg.Add(1)
buff.SetLastFractionLostReport(55)
buff.OnFeedback(func(fb []rtcp.Packet) {
for _, pkt := range fb {
switch p := pkt.(type) {
case *rtcp.ReceiverReport:
for _, v := range p.Reports {
assert.EqualValues(t, 55, v.FractionLost)
}
wg.Done()
}
}
})
buff.Bind(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{
{
RTPCodecCapability: webrtc.RTPCodecCapability{
MimeType: "audio/opus",
ClockRate: 48000,
},
PayloadType: 96,
},
},
}, Options{})
for i := 0; i < 15; i++ {
pkt := rtp.Packet{
Header: rtp.Header{SequenceNumber: uint16(i), Timestamp: uint32(i)},
Payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1},
}
b, err := pkt.Marshal()
assert.NoError(t, err)
if i == 1 {
time.Sleep(1 * time.Second)
}
_, err = buff.Write(b)
assert.NoError(t, err)
}
wg.Wait()
}
func TestSeqWrapHandler(t *testing.T) {
s := SeqWrapHandler{}
s.UpdateMaxSeq(1)
assert.Equal(t, uint32(1), s.MaxSeqNo())
type caseInfo struct {
seqs []uint32 //{seq1, seq2, unwrap of seq2}
newer bool // seq2 is newer than seq1
}
// test normal case, name -> {seq1, seq2, unwrap of seq2}
cases := map[string]caseInfo{
"no wrap": {[]uint32{1, 4, 4}, true},
"no wrap backward": {[]uint32{4, 1, 1}, false},
"wrap around forward to zero": {[]uint32{65534, 0, 65536}, true},
"wrap around forward": {[]uint32{65534, 10, 65546}, true},
"wrap around forward 2": {[]uint32{65535 + 65536*2, 1, 1 + 65536*3}, true},
"wrap around backward ": {[]uint32{5, 65534, 65534}, false},
"wrap around backward less than zero": {[]uint32{5, 65534, 65534}, false},
}
for k, v := range cases {
t.Run(k, func(t *testing.T) {
s := SeqWrapHandler{}
s.UpdateMaxSeq(v.seqs[0])
extsn, newer := s.Unwrap(uint16(v.seqs[1]))
assert.Equal(t, v.newer, newer)
assert.Equal(t, v.seqs[2], extsn)
})
}
}
func TestIsTimestampWrap(t *testing.T) {
type caseInfo struct {
name string
ts1 uint32
ts2 uint32
later bool
}
cases := []caseInfo{
{"normal case 1 timestamp later ", 2, 1, true},
{"normal case 2 timestamp later", 0x1c000000, 0x10000000, true},
{"wrap case timestamp later", 0xffff, 0xfc000000, true},
{"wrap case timestamp early", 0xfc000000, 0xffff, false},
}
for _, c := range cases {
t.Run(c.name, func(t *testing.T) {
assert.Equal(t, c.later, IsLaterTimestamp(c.ts1, c.ts2))
})
}
}

10
pkg/sfu/buffer/errors.go Normal file
View File

@@ -0,0 +1,10 @@
package buffer
import "errors"
var (
errPacketNotFound = errors.New("packet not found in cache")
errBufferTooSmall = errors.New("buffer too small")
errPacketTooOld = errors.New("received packet too old")
errRTXPacket = errors.New("packet already received")
)

97
pkg/sfu/buffer/factory.go Normal file
View File

@@ -0,0 +1,97 @@
package buffer
import (
"io"
"sync"
"github.com/go-logr/logr"
"github.com/pion/transport/packetio"
)
type Factory struct {
sync.RWMutex
videoPool *sync.Pool
audioPool *sync.Pool
rtpBuffers map[uint32]*Buffer
rtcpReaders map[uint32]*RTCPReader
logger logr.Logger
}
func NewBufferFactory(trackingPackets int, logger logr.Logger) *Factory {
// Enable package wide logging for non-method functions.
// If logger is empty - use default Logger.
// Logger is a public variable in buffer package.
if logger == (logr.Logger{}) {
logger = Logger
} else {
Logger = logger
}
return &Factory{
videoPool: &sync.Pool{
New: func() interface{} {
b := make([]byte, trackingPackets*maxPktSize)
return &b
},
},
audioPool: &sync.Pool{
New: func() interface{} {
b := make([]byte, maxPktSize*25)
return &b
},
},
rtpBuffers: make(map[uint32]*Buffer),
rtcpReaders: make(map[uint32]*RTCPReader),
logger: logger,
}
}
func (f *Factory) GetOrNew(packetType packetio.BufferPacketType, ssrc uint32) io.ReadWriteCloser {
f.Lock()
defer f.Unlock()
switch packetType {
case packetio.RTCPBufferPacket:
if reader, ok := f.rtcpReaders[ssrc]; ok {
return reader
}
reader := NewRTCPReader(ssrc)
f.rtcpReaders[ssrc] = reader
reader.OnClose(func() {
f.Lock()
delete(f.rtcpReaders, ssrc)
f.Unlock()
})
return reader
case packetio.RTPBufferPacket:
if reader, ok := f.rtpBuffers[ssrc]; ok {
return reader
}
buffer := NewBuffer(ssrc, f.videoPool, f.audioPool, f.logger)
f.rtpBuffers[ssrc] = buffer
buffer.OnClose(func() {
f.Lock()
delete(f.rtpBuffers, ssrc)
f.Unlock()
})
return buffer
}
return nil
}
func (f *Factory) GetBufferPair(ssrc uint32) (*Buffer, *RTCPReader) {
f.RLock()
defer f.RUnlock()
return f.rtpBuffers[ssrc], f.rtcpReaders[ssrc]
}
func (f *Factory) GetBuffer(ssrc uint32) *Buffer {
f.RLock()
defer f.RUnlock()
return f.rtpBuffers[ssrc]
}
func (f *Factory) GetRTCPReader(ssrc uint32) *RTCPReader {
f.RLock()
defer f.RUnlock()
return f.rtcpReaders[ssrc]
}

297
pkg/sfu/buffer/helpers.go Normal file
View File

@@ -0,0 +1,297 @@
package buffer
import (
"encoding/binary"
"errors"
"sync/atomic"
)
var (
errShortPacket = errors.New("packet is not large enough")
errNilPacket = errors.New("invalid nil packet")
errInvalidPacket = errors.New("invalid packet")
)
type atomicBool int32
func (a *atomicBool) set(value bool) {
var i int32
if value {
i = 1
}
atomic.StoreInt32((*int32)(a), i)
}
func (a *atomicBool) get() bool {
return atomic.LoadInt32((*int32)(a)) != 0
}
// VP8 is a helper to get temporal data from VP8 packet header
/*
VP8 Payload Descriptor
0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
|X|R|N|S|R| PID | (REQUIRED) |X|R|N|S|R| PID | (REQUIRED)
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
X: |I|L|T|K| RSV | (OPTIONAL) X: |I|L|T|K| RSV | (OPTIONAL)
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
I: |M| PictureID | (OPTIONAL) I: |M| PictureID | (OPTIONAL)
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
L: | TL0PICIDX | (OPTIONAL) | PictureID |
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
T/K:|TID|Y| KEYIDX | (OPTIONAL) L: | TL0PICIDX | (OPTIONAL)
+-+-+-+-+-+-+-+-+ +-+-+-+-+-+-+-+-+
T/K:|TID|Y| KEYIDX | (OPTIONAL)
+-+-+-+-+-+-+-+-+
*/
type VP8 struct {
TemporalSupported bool // LK-TODO: CLEANUP-REMOVE
FirstByte byte
PictureIDPresent int
PictureID uint16 /* 8 or 16 bits, picture ID */
PicIDIdx int // LK-TODO: CLEANUP-REMOVE
MBit bool
TL0PICIDXPresent int
TL0PICIDX uint8 /* 8 bits temporal level zero index */
TlzIdx int // LK-TODO: CLEANUP-REMOVE
// Optional Header If either of the T or K bits are set to 1,
// the TID/Y/KEYIDX extension field MUST be present.
TIDPresent int
TID uint8 /* 2 bits temporal layer idx */
Y uint8
KEYIDXPresent int
KEYIDX uint8 /* 5 bits of key frame idx */
HeaderSize int
// IsKeyFrame is a helper to detect if current packet is a keyframe
IsKeyFrame bool
}
// Unmarshal parses the passed byte slice and stores the result in the VP8 this method is called upon
func (p *VP8) Unmarshal(payload []byte) error {
if payload == nil {
return errNilPacket
}
payloadLen := len(payload)
if payloadLen < 1 {
return errShortPacket
}
idx := 0
p.FirstByte = payload[idx]
S := payload[idx]&0x10 > 0
// Check for extended bit control
if payload[idx]&0x80 > 0 {
idx++
if payloadLen < idx+1 {
return errShortPacket
}
I := payload[idx]&0x80 > 0
L := payload[idx]&0x40 > 0
T := payload[idx]&0x20 > 0
K := payload[idx]&0x10 > 0
if L && !T {
return errInvalidPacket
}
// Check if T is present, if not, no temporal layer is available
p.TemporalSupported = payload[idx]&0x20 > 0
// Check for PictureID
if I {
idx++
if payloadLen < idx+1 {
return errShortPacket
}
p.PicIDIdx = idx
p.PictureIDPresent = 1
pid := payload[idx] & 0x7f
// Check if m is 1, then Picture ID is 15 bits
if payload[idx]&0x80 > 0 {
idx++
if payloadLen < idx+1 {
return errShortPacket
}
p.MBit = true
p.PictureID = binary.BigEndian.Uint16([]byte{pid, payload[idx]})
} else {
p.PictureID = uint16(pid)
}
}
// Check if TL0PICIDX is present
if L {
idx++
if payloadLen < idx+1 {
return errShortPacket
}
p.TlzIdx = idx
p.TL0PICIDXPresent = 1
if int(idx) >= payloadLen {
return errShortPacket
}
p.TL0PICIDX = payload[idx]
}
if T || K {
idx++
if payloadLen < idx+1 {
return errShortPacket
}
if T {
p.TIDPresent = 1
p.TID = (payload[idx] & 0xc0) >> 6
p.Y = (payload[idx] & 0x20) >> 5
}
if K {
p.KEYIDXPresent = 1
p.KEYIDX = payload[idx] & 0x1f
}
}
if idx >= payloadLen {
return errShortPacket
}
idx++
if payloadLen < idx+1 {
return errShortPacket
}
// Check is packet is a keyframe by looking at P bit in vp8 payload
p.IsKeyFrame = payload[idx]&0x01 == 0 && S
} else {
idx++
if payloadLen < idx+1 {
return errShortPacket
}
// Check is packet is a keyframe by looking at P bit in vp8 payload
p.IsKeyFrame = payload[idx]&0x01 == 0 && S
}
p.HeaderSize = idx
return nil
}
func (v *VP8) MarshalTo(buf []byte) error {
if len(buf) < v.HeaderSize {
return errShortPacket
}
idx := 0
buf[idx] = v.FirstByte
if (v.PictureIDPresent + v.TL0PICIDXPresent + v.TIDPresent + v.KEYIDXPresent) != 0 {
buf[idx] |= 0x80 // X bit
idx++
buf[idx] = byte(v.PictureIDPresent<<7) | byte(v.TL0PICIDXPresent<<6) | byte(v.TIDPresent<<5) | byte(v.KEYIDXPresent<<4)
idx++
if v.PictureIDPresent == 1 {
if v.MBit {
buf[idx] = 0x80 | byte((v.PictureID>>8)&0x7f)
buf[idx+1] = byte(v.PictureID & 0xff)
idx += 2
} else {
buf[idx] = byte(v.PictureID)
idx++
}
}
if v.TL0PICIDXPresent == 1 {
buf[idx] = byte(v.TL0PICIDX)
idx++
}
if v.TIDPresent == 1 || v.KEYIDXPresent == 1 {
buf[idx] = 0
if v.TIDPresent == 1 {
buf[idx] = byte(v.TID<<6) | byte(v.Y<<5)
}
if v.KEYIDXPresent == 1 {
buf[idx] |= byte(v.KEYIDX & 0x1f)
}
idx++
}
} else {
buf[idx] &^= 0x80 // X bit
idx++
}
return nil
}
func VP8PictureIdSizeDiff(mBit1 bool, mBit2 bool) int {
if mBit1 == mBit2 {
return 0
}
if mBit1 {
return 1
}
return -1
}
// isH264Keyframe detects if h264 payload is a keyframe
// this code was taken from https://github.com/jech/galene/blob/codecs/rtpconn/rtpreader.go#L45
// all credits belongs to Juliusz Chroboczek @jech and the awesome Galene SFU
func isH264Keyframe(payload []byte) bool {
if len(payload) < 1 {
return false
}
nalu := payload[0] & 0x1F
if nalu == 0 {
// reserved
return false
} else if nalu <= 23 {
// simple NALU
return nalu == 5
} else if nalu == 24 || nalu == 25 || nalu == 26 || nalu == 27 {
// STAP-A, STAP-B, MTAP16 or MTAP24
i := 1
if nalu == 25 || nalu == 26 || nalu == 27 {
// skip DON
i += 2
}
for i < len(payload) {
if i+2 > len(payload) {
return false
}
length := uint16(payload[i])<<8 |
uint16(payload[i+1])
i += 2
if i+int(length) > len(payload) {
return false
}
offset := 0
if nalu == 26 {
offset = 3
} else if nalu == 27 {
offset = 4
}
if offset >= int(length) {
return false
}
n := payload[i+offset] & 0x1F
if n == 7 {
return true
} else if n >= 24 {
// is this legal?
Logger.V(0).Info("Non-simple NALU within a STAP")
}
i += int(length)
}
if i == len(payload) {
return false
}
return false
} else if nalu == 28 || nalu == 29 {
// FU-A or FU-B
if len(payload) < 2 {
return false
}
if (payload[1] & 0x80) == 0 {
// not a starting fragment
return false
}
return payload[1]&0x1F == 7
}
return false
}

View File

@@ -0,0 +1,94 @@
package buffer
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestVP8Helper_Unmarshal(t *testing.T) {
type args struct {
payload []byte
}
tests := []struct {
name string
args args
wantErr bool
checkTemporal bool
temporalSupport bool
checkKeyFrame bool
keyFrame bool
checkPictureID bool
pictureID uint16
checkTlzIdx bool
tlzIdx uint8
checkTempID bool
temporalID uint8
}{
{
name: "Empty or nil payload must return error",
args: args{payload: []byte{}},
wantErr: true,
},
{
name: "Temporal must be supported by setting T bit to 1",
args: args{payload: []byte{0xff, 0x20, 0x1, 0x2, 0x3, 0x4}},
checkTemporal: true,
temporalSupport: true,
},
{
name: "Picture must be ID 7 bits by setting M bit to 0 and present by I bit set to 1",
args: args{payload: []byte{0xff, 0xff, 0x11, 0x2, 0x3, 0x4}},
checkPictureID: true,
pictureID: 17,
},
{
name: "Picture ID must be 15 bits by setting M bit to 1 and present by I bit set to 1",
args: args{payload: []byte{0xff, 0xff, 0x92, 0x67, 0x3, 0x4, 0x5}},
checkPictureID: true,
pictureID: 4711,
},
{
name: "Temporal level zero index must be present if L set to 1",
args: args{payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x4, 0x5}},
checkTlzIdx: true,
tlzIdx: 180,
},
{
name: "Temporal index must be present and used if T bit set to 1",
args: args{payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x5, 0x6}},
checkTempID: true,
temporalID: 2,
},
{
name: "Check if packet is a keyframe by looking at P bit set to 0",
args: args{payload: []byte{0xff, 0xff, 0xff, 0xfd, 0xb4, 0x9f, 0x94, 0x1}},
checkKeyFrame: true,
keyFrame: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
p := &VP8{}
if err := p.Unmarshal(tt.args.payload); (err != nil) != tt.wantErr {
t.Errorf("Unmarshal() error = %v, wantErr %v", err, tt.wantErr)
}
if tt.checkTemporal {
assert.Equal(t, tt.temporalSupport, p.TemporalSupported)
}
if tt.checkKeyFrame {
assert.Equal(t, tt.keyFrame, p.IsKeyFrame)
}
if tt.checkPictureID {
assert.Equal(t, tt.pictureID, p.PictureID)
}
if tt.checkTlzIdx {
assert.Equal(t, tt.tlzIdx, p.TL0PICIDX)
}
if tt.checkTempID {
assert.Equal(t, tt.temporalID, p.TID)
}
})
}
}

106
pkg/sfu/buffer/nack.go Normal file
View File

@@ -0,0 +1,106 @@
package buffer
import (
"sort"
"github.com/pion/rtcp"
)
const maxNackTimes = 3 // Max number of times a packet will be NACKed
const maxNackCache = 100 // Max NACK sn the sfu will keep reference
type nack struct {
sn uint32
nacked uint8
}
type nackQueue struct {
nacks []nack
kfSN uint32
}
func newNACKQueue() *nackQueue {
return &nackQueue{
nacks: make([]nack, 0, maxNackCache+1),
}
}
func (n *nackQueue) remove(extSN uint32) {
i := sort.Search(len(n.nacks), func(i int) bool { return n.nacks[i].sn >= extSN })
if i >= len(n.nacks) || n.nacks[i].sn != extSN {
return
}
copy(n.nacks[i:], n.nacks[i+1:])
n.nacks = n.nacks[:len(n.nacks)-1]
}
func (n *nackQueue) push(extSN uint32) {
i := sort.Search(len(n.nacks), func(i int) bool { return n.nacks[i].sn >= extSN })
if i < len(n.nacks) && n.nacks[i].sn == extSN {
return
}
nck := nack{
sn: extSN,
nacked: 0,
}
if i == len(n.nacks) {
n.nacks = append(n.nacks, nck)
} else {
n.nacks = append(n.nacks[:i+1], n.nacks[i:]...)
n.nacks[i] = nck
}
if len(n.nacks) >= maxNackCache {
copy(n.nacks, n.nacks[1:])
}
}
func (n *nackQueue) pairs(headSN uint32) ([]rtcp.NackPair, bool) {
if len(n.nacks) == 0 {
return nil, false
}
i := 0
askKF := false
var np rtcp.NackPair
var nps []rtcp.NackPair
lostIdx := -1
for _, nck := range n.nacks {
if nck.nacked >= maxNackTimes {
if nck.sn > n.kfSN {
n.kfSN = nck.sn
askKF = true
}
continue
}
if nck.sn >= headSN-2 {
n.nacks[i] = nck
i++
continue
}
n.nacks[i] = nack{
sn: nck.sn,
nacked: nck.nacked + 1,
}
i++
// first nackpair or need a new nackpair
if lostIdx < 0 || nck.sn > n.nacks[lostIdx].sn+16 {
if lostIdx >= 0 {
nps = append(nps, np)
}
np.PacketID = uint16(nck.sn)
np.LostPackets = 0
lostIdx = i - 1
continue
}
np.LostPackets |= 1 << ((nck.sn) - n.nacks[lostIdx].sn - 1)
}
// append last nackpair
if lostIdx != -1 {
nps = append(nps, np)
}
n.nacks = n.nacks[:i]
return nps, askKF
}

196
pkg/sfu/buffer/nack_test.go Normal file
View File

@@ -0,0 +1,196 @@
package buffer
import (
"math/rand"
"reflect"
"testing"
"time"
"github.com/pion/rtcp"
"github.com/stretchr/testify/assert"
)
func Test_nackQueue_pairs(t *testing.T) {
type fields struct {
nacks []nack
}
tests := []struct {
name string
fields fields
args []uint32
want []rtcp.NackPair
}{
{
name: "Must return correct single pairs pair",
fields: fields{
nacks: nil,
},
args: []uint32{1, 2, 4, 5},
want: []rtcp.NackPair{{
PacketID: 1,
LostPackets: 13,
}},
},
{
name: "Must return correct pair wrap",
fields: fields{
nacks: nil,
},
args: []uint32{65536, 65538, 65540, 65541, 65566, 65568}, // wrap around 65533,2,4,5
want: []rtcp.NackPair{{
PacketID: 0, // 65536
LostPackets: 1<<4 + 1<<3 + 1<<1,
},
{
PacketID: 30, // 65566
LostPackets: 1 << 1,
}},
},
{
name: "Must return 2 pairs pair",
fields: fields{
nacks: nil,
},
args: []uint32{1, 2, 4, 5, 20, 22, 24, 27},
want: []rtcp.NackPair{
{
PacketID: 1,
LostPackets: 13,
},
{
PacketID: 20,
LostPackets: 74,
},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
n := &nackQueue{
nacks: tt.fields.nacks,
}
for _, sn := range tt.args {
n.push(sn)
}
got, _ := n.pairs(75530)
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("pairs() = %v, want %v", got, tt.want)
}
})
}
}
func Test_nackQueue_push(t *testing.T) {
type fields struct {
nacks []nack
}
type args struct {
sn []uint32
}
tests := []struct {
name string
fields fields
args args
want []uint32
}{
{
name: "Must keep packet order",
fields: fields{
nacks: make([]nack, 0, 10),
},
args: args{
sn: []uint32{3, 4, 1, 5, 8, 7, 5},
},
want: []uint32{1, 3, 4, 5, 7, 8},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
n := &nackQueue{
nacks: tt.fields.nacks,
}
for _, sn := range tt.args.sn {
n.push(sn)
}
var newSN []uint32
for _, sn := range n.nacks {
newSN = append(newSN, sn.sn)
}
assert.Equal(t, tt.want, newSN)
})
}
}
func Test_nackQueue(t *testing.T) {
type fields struct {
nacks []nack
}
type args struct {
sn []uint32
}
tests := []struct {
name string
fields fields
args args
}{
{
name: "Must keep packet order",
fields: fields{
nacks: make([]nack, 0, 10),
},
args: args{
sn: []uint32{3, 4, 1, 5, 8, 7, 5},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
n := nackQueue{}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
for i := 0; i < 100; i++ {
assert.NotPanics(t, func() {
n.push(uint32(r.Intn(60000)))
n.remove(uint32(r.Intn(60000)))
n.pairs(60001)
})
}
})
}
}
func Test_nackQueue_remove(t *testing.T) {
type args struct {
sn []uint32
}
tests := []struct {
name string
args args
want []uint32
}{
{
name: "Must keep packet order",
args: args{
sn: []uint32{3, 4, 1, 5, 8, 7, 5},
},
want: []uint32{1, 3, 4, 7, 8},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
n := nackQueue{}
for _, sn := range tt.args.sn {
n.push(sn)
}
n.remove(5)
var newSN []uint32
for _, sn := range n.nacks {
newSN = append(newSN, sn.sn)
}
assert.Equal(t, tt.want, newSN)
})
}
}

View File

@@ -0,0 +1,44 @@
package buffer
import (
"io"
"sync/atomic"
)
type RTCPReader struct {
ssrc uint32
closed atomicBool
onPacket atomic.Value //func([]byte)
onClose func()
}
func NewRTCPReader(ssrc uint32) *RTCPReader {
return &RTCPReader{ssrc: ssrc}
}
func (r *RTCPReader) Write(p []byte) (n int, err error) {
if r.closed.get() {
err = io.EOF
return
}
if f, ok := r.onPacket.Load().(func([]byte)); ok {
f(p)
}
return
}
func (r *RTCPReader) OnClose(fn func()) {
r.onClose = fn
}
func (r *RTCPReader) Close() error {
r.closed.set(true)
r.onClose()
return nil
}
func (r *RTCPReader) OnPacket(f func([]byte)) {
r.onPacket.Store(f)
}
func (r *RTCPReader) Read(_ []byte) (n int, err error) { return }

81
pkg/sfu/datachannel.go Normal file
View File

@@ -0,0 +1,81 @@
package sfu
import (
"context"
"github.com/pion/webrtc/v3"
)
type (
// Datachannel is a wrapper to define middlewares executed on defined label.
// The datachannels created will be negotiated on join to all peers that joins
// the SFU.
Datachannel struct {
Label string
middlewares []func(MessageProcessor) MessageProcessor
onMessage func(ctx context.Context, args ProcessArgs)
}
ProcessArgs struct {
Peer Peer
Message webrtc.DataChannelMessage
DataChannel *webrtc.DataChannel
}
Middlewares []func(MessageProcessor) MessageProcessor
MessageProcessor interface {
Process(ctx context.Context, args ProcessArgs)
}
ProcessFunc func(ctx context.Context, args ProcessArgs)
chainHandler struct {
middlewares Middlewares
Last MessageProcessor
current MessageProcessor
}
)
// Use adds the middlewares to the current Datachannel.
// The middlewares are going to be executed before the OnMessage event fires.
func (dc *Datachannel) Use(middlewares ...func(MessageProcessor) MessageProcessor) {
dc.middlewares = append(dc.middlewares, middlewares...)
}
// OnMessage sets the message callback for the datachannel, the event is fired
// after all the middlewares have processed the message.
func (dc *Datachannel) OnMessage(fn func(ctx context.Context, args ProcessArgs)) {
dc.onMessage = fn
}
func (p ProcessFunc) Process(ctx context.Context, args ProcessArgs) {
p(ctx, args)
}
func (mws Middlewares) Process(h MessageProcessor) MessageProcessor {
return &chainHandler{mws, h, chain(mws, h)}
}
func (mws Middlewares) ProcessFunc(h MessageProcessor) MessageProcessor {
return &chainHandler{mws, h, chain(mws, h)}
}
func newDCChain(m []func(p MessageProcessor) MessageProcessor) Middlewares {
return Middlewares(m)
}
func (c *chainHandler) Process(ctx context.Context, args ProcessArgs) {
c.current.Process(ctx, args)
}
func chain(mws []func(processor MessageProcessor) MessageProcessor, last MessageProcessor) MessageProcessor {
if len(mws) == 0 {
return last
}
h := mws[len(mws)-1](last)
for i := len(mws) - 2; i >= 0; i-- {
h = mws[i](h)
}
return h
}

1668
pkg/sfu/downtrack.go Normal file

File diff suppressed because it is too large Load Diff

18
pkg/sfu/errors.go Normal file
View File

@@ -0,0 +1,18 @@
package sfu
import "errors"
var (
// PeerLocal erors
errPeerConnectionInitFailed = errors.New("pc init failed")
errCreatingDataChannel = errors.New("failed to create data channel")
// router errors
errNoReceiverFound = errors.New("no receiver found")
// Helpers errors
errShortPacket = errors.New("packet is not large enough")
errNilPacket = errors.New("invalid nil packet")
ErrSpatialNotSupported = errors.New("current track does not support simulcast/SVC")
ErrSpatialLayerNotFound = errors.New("the requested layer does not exist")
ErrSpatialLayerBusy = errors.New("a spatial layer change is in progress, try later")
)

131
pkg/sfu/helpers.go Normal file
View File

@@ -0,0 +1,131 @@
package sfu
import (
"encoding/binary"
"strings"
"time"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/pion/webrtc/v3"
)
var (
ntpEpoch = time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC)
)
type ntpTime uint64
// LK-TODO: CLEANUP-REMOVE-FUNCTION
// setVp8TemporalLayer is a helper to detect and modify accordingly the vp8 payload to reflect
// temporal changes in the SFU.
// VP8 temporal layers implemented according https://tools.ietf.org/html/rfc7741
func setVP8TemporalLayer(p *buffer.ExtPacket, d *DownTrack) (buf []byte, picID uint16, tlz0Idx uint8, drop bool) {
pkt, ok := p.Payload.(buffer.VP8)
if !ok {
return
}
layer := d.temporalLayer.get()
currentLayer := uint16(layer)
currentTargetLayer := uint16(layer >> 16)
// Check if temporal getLayer is requested
if currentTargetLayer != currentLayer {
if pkt.TID <= uint8(currentTargetLayer) {
d.temporalLayer.set(int32(currentTargetLayer)<<16 | int32(currentTargetLayer))
}
} else if pkt.TID > uint8(currentLayer) {
drop = true
return
}
buf = *d.payload
buf = buf[:len(p.Packet.Payload)]
copy(buf, p.Packet.Payload)
picID = pkt.PictureID - d.simulcast.refPicID.get() + d.simulcast.pRefPicID.get() + 1
tlz0Idx = pkt.TL0PICIDX - d.simulcast.refTlZIdx.get() + d.simulcast.pRefTlZIdx.get() + 1
if p.Head {
d.simulcast.lPicID.set(picID)
d.simulcast.lTlZIdx.set(tlz0Idx)
}
modifyVP8TemporalPayload(buf, pkt.PicIDIdx, pkt.TlzIdx, picID, tlz0Idx, pkt.MBit)
return
}
// LK-TODO: CLEANUP-REMOVE-FUNCTION
func modifyVP8TemporalPayload(payload []byte, picIDIdx, tlz0Idx int, picID uint16, tlz0ID uint8, mBit bool) {
pid := make([]byte, 2)
binary.BigEndian.PutUint16(pid, picID)
payload[picIDIdx] = pid[0]
if mBit {
payload[picIDIdx] |= 0x80
payload[picIDIdx+1] = pid[1]
}
payload[tlz0Idx] = tlz0ID
}
// Do a fuzzy find for a codec in the list of codecs
// Used for lookup up a codec in an existing list to find a match
func codecParametersFuzzySearch(needle webrtc.RTPCodecParameters, haystack []webrtc.RTPCodecParameters) (webrtc.RTPCodecParameters, error) {
// First attempt to match on MimeType + SDPFmtpLine
for _, c := range haystack {
if strings.EqualFold(c.RTPCodecCapability.MimeType, needle.RTPCodecCapability.MimeType) &&
c.RTPCodecCapability.SDPFmtpLine == needle.RTPCodecCapability.SDPFmtpLine {
return c, nil
}
}
// Fallback to just MimeType
for _, c := range haystack {
if strings.EqualFold(c.RTPCodecCapability.MimeType, needle.RTPCodecCapability.MimeType) {
return c, nil
}
}
return webrtc.RTPCodecParameters{}, webrtc.ErrCodecNotFound
}
func ntpToMillisSinceEpoch(ntp uint64) uint64 {
// ntp time since epoch calculate fractional ntp as milliseconds
// (lower 32 bits stored as 1/2^32 seconds) and add
// ntp seconds (stored in higher 32 bits) as milliseconds
return (((ntp & 0xFFFFFFFF) * 1000) >> 32) + ((ntp >> 32) * 1000)
}
func fastForwardTimestampAmount(newestTimestamp uint32, referenceTimestamp uint32) uint32 {
if buffer.IsTimestampWrapAround(newestTimestamp, referenceTimestamp) {
return uint32(uint64(newestTimestamp) + 0x100000000 - uint64(referenceTimestamp))
}
if newestTimestamp < referenceTimestamp {
return 0
}
return newestTimestamp - referenceTimestamp
}
func (t ntpTime) Duration() time.Duration {
sec := (t >> 32) * 1e9
frac := (t & 0xffffffff) * 1e9
nsec := frac >> 32
if uint32(frac) >= 0x80000000 {
nsec++
}
return time.Duration(sec + nsec)
}
func (t ntpTime) Time() time.Time {
return ntpEpoch.Add(t.Duration())
}
func toNtpTime(t time.Time) ntpTime {
nsec := uint64(t.Sub(ntpEpoch))
sec := nsec / 1e9
nsec = (nsec - sec*1e9) << 32
frac := nsec / 1e9
if nsec%1e9 >= 1e9/2 {
frac++
}
return ntpTime(sec<<32 | frac)
}

34
pkg/sfu/helpers_test.go Normal file
View File

@@ -0,0 +1,34 @@
package sfu
import (
"testing"
"time"
)
func Test_timeToNtp(t *testing.T) {
type args struct {
ns time.Time
}
tests := []struct {
name string
args args
wantNTP uint64
}{
{
name: "Must return correct NTP time",
args: args{
ns: time.Unix(1602391458, 1234),
},
wantNTP: 16369753560730047668,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
gotNTP := uint64(toNtpTime(tt.args.ns))
if gotNTP != tt.wantNTP {
t.Errorf("timeToNtp() gotFraction = %v, want %v", gotNTP, tt.wantNTP)
}
})
}
}

133
pkg/sfu/logger/zerologr.go Normal file
View File

@@ -0,0 +1,133 @@
// Copyright 2019 Jorn Friedrich Dreyer
// Modified 2021 Serhii Mikhno
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package logger defines a default implementation of the github.com/go-logr/logr
// interfaces built on top of zerolog (github.com/rs/zerolog) and is the default
// implementation for ion-sfu released binaries.
// This package separates log level into two different concepts:
// - V-Level - verbosity level, number, that every logger has.
// A higher value that means more logs will be written.
// - Log-level - usual log level (TRACE|DEBUG|INFO).
// Every log row combines those two values.
// You can set log level to TRACE and see all general traces.
// To see more logs just add -v
package logger
import (
"fmt"
"io"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/go-logr/logr"
"github.com/go-logr/zerologr"
"github.com/rs/zerolog"
)
const (
timeFormat = "2006-01-02 15:04:05.000"
)
// GlobalConfig config contains global options
type GlobalConfig struct {
V int `mapstructure:"v"`
}
// SetGlobalOptions sets the global options, like level against which all info logs will be
// compared. If this is greater than or equal to the "V" of the logger, the
// message will be logged. Concurrent-safe.
func SetGlobalOptions(config GlobalConfig) {
lvl := 1 - config.V
if v := int(zerolog.TraceLevel); lvl < v {
lvl = v
} else if v := int(zerolog.InfoLevel); lvl > v {
lvl = v
}
zerolog.SetGlobalLevel(zerolog.Level(lvl))
}
// SetVLevelByStringGlobal does the same as SetGlobalOptions but
// trying to expose verbosity level as more familiar "word-based" log levels
func SetVLevelByStringGlobal(level string) {
if v, err := zerolog.ParseLevel(level); err == nil {
zerolog.SetGlobalLevel(v)
}
}
// Options that can be passed to NewWithOptions
type Options struct {
// Name is an optional name of the logger
Name string
TimeFormat string
Output io.Writer
// Logger is an instance of zerolog, if nil a default logger is used
Logger *zerolog.Logger
}
// New returns a logr.Logger, LogSink is implemented by zerolog.
func New() logr.Logger {
return NewWithOptions(Options{})
}
// NewWithOptions returns a logr.Logger, LogSink is implemented by zerolog.
func NewWithOptions(opts Options) logr.Logger {
if opts.TimeFormat != "" {
zerolog.TimeFieldFormat = opts.TimeFormat
} else {
zerolog.TimeFieldFormat = timeFormat
}
var out io.Writer
if opts.Output != nil {
out = opts.Output
} else {
out = getOutputFormat()
}
if opts.Logger == nil {
l := zerolog.New(out).With().Timestamp().Logger()
opts.Logger = &l
}
ls := zerologr.NewLogSink(opts.Logger)
if zerolog.LevelFieldName == "" {
// Restore field removed by Zerologr
zerolog.LevelFieldName = "level"
}
l := logr.New(ls)
if opts.Name != "" {
l = l.WithName(opts.Name)
}
return l
}
func getOutputFormat() zerolog.ConsoleWriter {
output := zerolog.ConsoleWriter{Out: os.Stdout, NoColor: false}
output.FormatTimestamp = func(i interface{}) string {
return "[" + i.(string) + "]"
}
output.FormatLevel = func(i interface{}) string {
return strings.ToUpper(fmt.Sprintf("[%-3s]", i))
}
output.FormatMessage = func(i interface{}) string {
_, file, line, _ := runtime.Caller(10)
return fmt.Sprintf("[%s:%d] => %s", filepath.Base(file), line, i)
}
return output
}

View File

@@ -0,0 +1,144 @@
package logger
import (
"bytes"
"sync/atomic"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDefaultVerbosityLevel(t *testing.T) {
t.Run("info", func(t *testing.T) {
SetGlobalOptions(GlobalConfig{V: 0})
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.Info("info")
assert.Equal(t, `{"level":"info","v":0,"time":"NOTIME","message":"info"}`+"\n", out.String())
})
t.Run("info-add-fields", func(t *testing.T) {
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.Info("", "test_field", 123)
assert.Equal(t, `{"level":"info","v":0,"test_field":123,"time":"NOTIME"}`+"\n", out.String())
})
t.Run("empty", func(t *testing.T) {
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.Info("")
assert.Equal(t, `{"level":"info","v":0,"time":"NOTIME"}`+"\n", out.String())
})
t.Run("disabled", func(t *testing.T) {
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.V(1).Info("You should not see this")
assert.Equal(t, ``, out.String())
})
}
func TestVerbosityLevel2(t *testing.T) {
t.Run("info", func(t *testing.T) {
SetGlobalOptions(GlobalConfig{V: 2})
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.Info("info")
assert.Equal(t, `{"level":"info","v":0,"time":"NOTIME","message":"info"}`+"\n", out.String())
})
t.Run("info-add-fields", func(t *testing.T) {
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.Info("", "test_field", 123)
assert.Equal(t, `{"level":"info","v":0,"test_field":123,"time":"NOTIME"}`+"\n", out.String())
})
t.Run("empty", func(t *testing.T) {
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.Info("")
assert.Equal(t, `{"level":"info","v":0,"time":"NOTIME"}`+"\n", out.String())
})
t.Run("disabled", func(t *testing.T) {
out := &bytes.Buffer{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: out})
log.V(1).Info("")
assert.Equal(t, `{"level":"debug","v":1,"time":"NOTIME"}`+"\n", out.String())
})
}
type blackholeStream struct {
writeCount uint64
}
func (s *blackholeStream) WriteCount() uint64 {
return atomic.LoadUint64(&s.writeCount)
}
func (s *blackholeStream) Write(p []byte) (int, error) {
atomic.AddUint64(&s.writeCount, 1)
return len(p), nil
}
func BenchmarkLoggerLogs(b *testing.B) {
stream := &blackholeStream{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: stream})
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
log.Info("The quick brown fox jumps over the lazy dog")
}
})
if stream.WriteCount() != uint64(b.N) {
b.Fatalf("Log write count")
}
}
func BenchmarLoggerLog(b *testing.B) {
stream := &blackholeStream{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: stream})
b.ResetTimer()
SetGlobalOptions(GlobalConfig{V: 1})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
log.V(1).Info("The quick brown fox jumps over the lazy dog")
}
})
if stream.WriteCount() != uint64(b.N) {
b.Fatalf("Log write count")
}
}
func BenchmarkLoggerLogWith10Fields(b *testing.B) {
stream := &blackholeStream{}
log := NewWithOptions(Options{TimeFormat: "NOTIME", Output: stream})
b.ResetTimer()
SetGlobalOptions(GlobalConfig{V: 1})
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
log.V(1).Info("The quick brown fox jumps over the lazy dog",
"test1", 1,
"test2", 2,
"test3", 3,
"test4", 4,
"test5", 5,
"test6", 6,
"test7", 7,
"test8", 8,
"test9", 9,
"test10", 10)
}
})
if stream.WriteCount() != uint64(b.N) {
b.Fatalf("Log write count")
}
}

92
pkg/sfu/mediaengine.go Normal file
View File

@@ -0,0 +1,92 @@
package sfu
import (
"github.com/pion/sdp/v3"
"github.com/pion/webrtc/v3"
)
const (
mimeTypeH264 = "video/h264"
mimeTypeOpus = "audio/opus"
mimeTypeVP8 = "video/vp8"
mimeTypeVP9 = "video/vp9"
)
const frameMarking = "urn:ietf:params:rtp-hdrext:framemarking"
func getPublisherMediaEngine() (*webrtc.MediaEngine, error) {
me := &webrtc.MediaEngine{}
if err := me.RegisterCodec(webrtc.RTPCodecParameters{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeOpus, ClockRate: 48000, Channels: 2, SDPFmtpLine: "minptime=10;useinbandfec=1", RTCPFeedback: nil},
PayloadType: 111,
}, webrtc.RTPCodecTypeAudio); err != nil {
return nil, err
}
videoRTCPFeedback := []webrtc.RTCPFeedback{{"goog-remb", ""}, {"ccm", "fir"}, {"nack", ""}, {"nack", "pli"}}
for _, codec := range []webrtc.RTPCodecParameters{
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeVP8, ClockRate: 90000, RTCPFeedback: videoRTCPFeedback},
PayloadType: 96,
},
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeVP9, ClockRate: 90000, SDPFmtpLine: "profile-id=0", RTCPFeedback: videoRTCPFeedback},
PayloadType: 98,
},
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeVP9, ClockRate: 90000, SDPFmtpLine: "profile-id=1", RTCPFeedback: videoRTCPFeedback},
PayloadType: 100,
},
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeH264, ClockRate: 90000, SDPFmtpLine: "level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42001f", RTCPFeedback: videoRTCPFeedback},
PayloadType: 102,
},
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeH264, ClockRate: 90000, SDPFmtpLine: "level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42001f", RTCPFeedback: videoRTCPFeedback},
PayloadType: 127,
},
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeH264, ClockRate: 90000, SDPFmtpLine: "level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=42e01f", RTCPFeedback: videoRTCPFeedback},
PayloadType: 125,
},
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeH264, ClockRate: 90000, SDPFmtpLine: "level-asymmetry-allowed=1;packetization-mode=0;profile-level-id=42e01f", RTCPFeedback: videoRTCPFeedback},
PayloadType: 108,
},
{
RTPCodecCapability: webrtc.RTPCodecCapability{MimeType: mimeTypeH264, ClockRate: 90000, SDPFmtpLine: "level-asymmetry-allowed=1;packetization-mode=1;profile-level-id=640032", RTCPFeedback: videoRTCPFeedback},
PayloadType: 123,
},
} {
if err := me.RegisterCodec(codec, webrtc.RTPCodecTypeVideo); err != nil {
return nil, err
}
}
for _, extension := range []string{
sdp.SDESMidURI,
sdp.SDESRTPStreamIDURI,
sdp.TransportCCURI,
frameMarking,
} {
if err := me.RegisterHeaderExtension(webrtc.RTPHeaderExtensionCapability{URI: extension}, webrtc.RTPCodecTypeVideo); err != nil {
return nil, err
}
}
for _, extension := range []string{
sdp.SDESMidURI,
sdp.SDESRTPStreamIDURI,
sdp.AudioLevelURI,
} {
if err := me.RegisterHeaderExtension(webrtc.RTPHeaderExtensionCapability{URI: extension}, webrtc.RTPCodecTypeAudio); err != nil {
return nil, err
}
}
return me, nil
}
func getSubscriberMediaEngine() (*webrtc.MediaEngine, error) {
me := &webrtc.MediaEngine{}
return me, nil
}

311
pkg/sfu/peer.go Normal file
View File

@@ -0,0 +1,311 @@
package sfu
import (
"errors"
"fmt"
"sync"
"github.com/lucsky/cuid"
"github.com/pion/webrtc/v3"
)
const (
publisher = 0
subscriber = 1
)
var (
// ErrTransportExists join is called after a peerconnection is established
ErrTransportExists = errors.New("rtc transport already exists for this connection")
// ErrNoTransportEstablished cannot signal before join
ErrNoTransportEstablished = errors.New("no rtc transport exists for this Peer")
// ErrOfferIgnored if offer received in unstable state
ErrOfferIgnored = errors.New("offered ignored")
)
type Peer interface {
ID() string
Session() Session
Publisher() *Publisher
Subscriber() *Subscriber
Close() error
SendDCMessage(label string, msg []byte) error
}
// JoinConfig allow adding more control to the peers joining a SessionLocal.
type JoinConfig struct {
// If true the peer will not be allowed to publish tracks to SessionLocal.
NoPublish bool
// If true the peer will not be allowed to subscribe to other peers in SessionLocal.
NoSubscribe bool
// If true the peer will not automatically subscribe all tracks,
// and then the peer can use peer.Subscriber().AddDownTrack/RemoveDownTrack
// to customize the subscrbe stream combination as needed.
// this parameter depends on NoSubscribe=false.
NoAutoSubscribe bool
}
// SessionProvider provides the SessionLocal to the sfu.Peer
// This allows the sfu.SFU{} implementation to be customized / wrapped by another package
type SessionProvider interface {
GetSession(sid string) (Session, WebRTCTransportConfig)
}
type ChannelAPIMessage struct {
Method string `json:"method"`
Params interface{} `json:"params,omitempty"`
}
// PeerLocal represents a pair peer connection
type PeerLocal struct {
sync.Mutex
id string
closed atomicBool
session Session
provider SessionProvider
publisher *Publisher
subscriber *Subscriber
OnOffer func(*webrtc.SessionDescription)
OnIceCandidate func(*webrtc.ICECandidateInit, int)
OnICEConnectionStateChange func(webrtc.ICEConnectionState)
remoteAnswerPending bool
negotiationPending bool
}
// NewPeer creates a new PeerLocal for signaling with the given SFU
func NewPeer(provider SessionProvider) *PeerLocal {
return &PeerLocal{
provider: provider,
}
}
// Join initializes this peer for a given sessionID
func (p *PeerLocal) Join(sid, uid string, config ...JoinConfig) error {
var conf JoinConfig
if len(config) > 0 {
conf = config[0]
}
if p.session != nil {
Logger.V(1).Info("peer already exists", "session_id", sid, "peer_id", p.id, "publisher_id", p.publisher.id)
return ErrTransportExists
}
if uid == "" {
uid = cuid.New()
}
p.id = uid
var err error
s, cfg := p.provider.GetSession(sid)
p.session = s
if !conf.NoSubscribe {
p.subscriber, err = NewSubscriber(uid, cfg)
if err != nil {
return fmt.Errorf("error creating transport: %v", err)
}
p.subscriber.noAutoSubscribe = conf.NoAutoSubscribe
p.subscriber.OnNegotiationNeeded(func() {
p.Lock()
defer p.Unlock()
if p.remoteAnswerPending {
p.negotiationPending = true
return
}
Logger.V(1).Info("Negotiation needed", "peer_id", p.id)
offer, err := p.subscriber.CreateOffer()
if err != nil {
Logger.Error(err, "CreateOffer error")
return
}
p.remoteAnswerPending = true
if p.OnOffer != nil && !p.closed.get() {
Logger.V(0).Info("Send offer", "peer_id", p.id)
p.OnOffer(&offer)
}
})
p.subscriber.OnICECandidate(func(c *webrtc.ICECandidate) {
Logger.V(1).Info("On subscriber ice candidate called for peer", "peer_id", p.id)
if c == nil {
return
}
if p.OnIceCandidate != nil && !p.closed.get() {
json := c.ToJSON()
p.OnIceCandidate(&json, subscriber)
}
})
}
if !conf.NoPublish {
p.publisher, err = NewPublisher(uid, p.session, &cfg)
if err != nil {
return fmt.Errorf("error creating transport: %v", err)
}
if !conf.NoSubscribe {
for _, dc := range p.session.GetDCMiddlewares() {
if err := p.subscriber.AddDatachannel(p, dc); err != nil {
return fmt.Errorf("setting subscriber default dc datachannel: %w", err)
}
}
}
p.publisher.OnICECandidate(func(c *webrtc.ICECandidate) {
Logger.V(1).Info("on publisher ice candidate called for peer", "peer_id", p.id)
if c == nil {
return
}
if p.OnIceCandidate != nil && !p.closed.get() {
json := c.ToJSON()
p.OnIceCandidate(&json, publisher)
}
})
p.publisher.OnICEConnectionStateChange(func(s webrtc.ICEConnectionState) {
if p.OnICEConnectionStateChange != nil && !p.closed.get() {
p.OnICEConnectionStateChange(s)
}
})
}
p.session.AddPeer(p)
Logger.V(0).Info("PeerLocal join SessionLocal", "peer_id", p.id, "session_id", sid)
if !conf.NoSubscribe {
p.session.Subscribe(p)
}
return nil
}
// Answer an offer from remote
func (p *PeerLocal) Answer(sdp webrtc.SessionDescription) (*webrtc.SessionDescription, error) {
if p.publisher == nil {
return nil, ErrNoTransportEstablished
}
Logger.V(0).Info("PeerLocal got offer", "peer_id", p.id)
if p.publisher.SignalingState() != webrtc.SignalingStateStable {
return nil, ErrOfferIgnored
}
answer, err := p.publisher.Answer(sdp)
if err != nil {
return nil, fmt.Errorf("error creating answer: %v", err)
}
Logger.V(0).Info("PeerLocal send answer", "peer_id", p.id)
return &answer, nil
}
// SetRemoteDescription when receiving an answer from remote
func (p *PeerLocal) SetRemoteDescription(sdp webrtc.SessionDescription) error {
if p.subscriber == nil {
return ErrNoTransportEstablished
}
p.Lock()
defer p.Unlock()
Logger.V(0).Info("PeerLocal got answer", "peer_id", p.id)
if err := p.subscriber.SetRemoteDescription(sdp); err != nil {
return fmt.Errorf("setting remote description: %w", err)
}
p.remoteAnswerPending = false
if p.negotiationPending {
p.negotiationPending = false
p.subscriber.negotiate()
}
return nil
}
// Trickle candidates available for this peer
func (p *PeerLocal) Trickle(candidate webrtc.ICECandidateInit, target int) error {
if p.subscriber == nil || p.publisher == nil {
return ErrNoTransportEstablished
}
Logger.V(0).Info("PeerLocal trickle", "peer_id", p.id)
switch target {
case publisher:
if err := p.publisher.AddICECandidate(candidate); err != nil {
return fmt.Errorf("setting ice candidate: %w", err)
}
case subscriber:
if err := p.subscriber.AddICECandidate(candidate); err != nil {
return fmt.Errorf("setting ice candidate: %w", err)
}
}
return nil
}
func (p *PeerLocal) SendDCMessage(label string, msg []byte) error {
if p.subscriber == nil {
return fmt.Errorf("no subscriber for this peer")
}
dc := p.subscriber.DataChannel(label)
if dc == nil {
return fmt.Errorf("data channel %s doesn't exist", label)
}
if err := dc.SendText(string(msg)); err != nil {
return fmt.Errorf("failed to send message: %v", err)
}
return nil
}
// Close shuts down the peer connection and sends true to the done channel
func (p *PeerLocal) Close() error {
p.Lock()
defer p.Unlock()
if !p.closed.set(true) {
return nil
}
if p.session != nil {
p.session.RemovePeer(p)
}
if p.publisher != nil {
p.publisher.Close()
}
if p.subscriber != nil {
if err := p.subscriber.Close(); err != nil {
return err
}
}
return nil
}
func (p *PeerLocal) Subscriber() *Subscriber {
return p.subscriber
}
func (p *PeerLocal) Publisher() *Publisher {
return p.publisher
}
func (p *PeerLocal) Session() Session {
return p.session
}
// ID return the peer id
func (p *PeerLocal) ID() string {
return p.id
}

352
pkg/sfu/prober.go Normal file
View File

@@ -0,0 +1,352 @@
//
// Design of Prober
//
// Probing is to used to check for existence of excess channel capacity.
// This is especially useful in the downstream direction of SFU.
// SFU forwards audio/video streams from one or more publishers to
// all the subscribers. But, the downstream channel of a subscriber
// may not be big enough to carry all the streams. It is also a time
// varying quantity.
//
// When there is not enough capacity, some streams will be paused.
// To resume a stream, SFU would need to know that the channel has
// enough capacity. That's where probing comes in. When conditions
// are favorable, SFU can send probe packets so that the bandwidth
// estimator has more data to estimate available channel capacity
// better.
// NOTE: What defines `favorable conditions` is implementation dependent.
//
// There are two options for probing
// - Use padding only RTP packets: This one is preferable as
// probe rate can be controlled more tightly.
// - Resume a paused stream or forward a higher spatial layer:
// Have to find a stream at probing rate. Also, a stream could
// get a key frame unexpectedly boosting rate in the probing
// window.
//
// The strategy used depends on stream allocator implementation.
// This module can be used if the stream allocator decides to use
// padding only RTP packets for probing purposes.
//
// Implementation:
// There are a couple of options
// - Check prober in the forwarding path (pull from prober).
// This is preferred for scalability reasons. But, this
// suffers from not being able to probe when all streams
// are paused (could be due to downstream bandwidth
// constraints or the corresponding upstream tracks may
// have paused due to upstream bandwidth constraints).
// Another issue is not being to have tight control on
// probing window boundary as the packet forwarding path
// may not have a packet to forward. But, it should not
// be a major concern as long as some stream(s) is/are
// forwarded as there should be a packet atleast every
// 60 ms or so (forwarding only one stream at 15 fps).
// Usually, it will be serviced much more frequently when
// there are multiple streams getting forwarded.
// - Run it a go routine. But, that would have to wake up
// very often to prevent bunching up of probe
// packets. So, a scalability concern as there is one prober
// per subscriber peer connection. But, probe windows
// should be very short (of the order of 100s of ms).
// So, this approach might be fine.
//
// The implementation here follows the second approach of using a
// go routine.
//
// Pacing:
// ------
// Ideally, the subscriber peer connection should have a pacer which
// trickles data out at the estimated channel capacity rate (and
// estimated channel capacity + probing rate when actively probing).
//
// But, there a few significant challenges
// 1. Pacer will require buffering of forwarded packets. That means
// more memory, more CPU (have to make copy of packets) and
// more latency in the media stream.
// 2. Scalability concern as SFU may be handling hundreds of
// subscriber peer connections and each one processing the pacing
// loop at 5ms interval will add up.
//
// So, this module assumes that pacing is inherently provided by the
// publishers for media streams. That is a reasonable assumption given
// that publishing clients will run their own pacer and pacing data out
// at a steady rate.
//
// A further assumption is that if there are multiple publishers for
// a subscriber peer connection, all the publishers are not pacing
// in sync, i. e. each publisher's pacer is completely independent
// and SFU will be receiving the media packets with a good spread and
// not clumped together.
//
// Given those assumptions, this module monitors media send rate and
// adjusts probing packet sends accordingly. Although the probing may
// have a high enough wake up frequency, it is for short windows.
// For example, probing at 5 Mbps for 1/2 second and sending 1000 byte
// probe per iteration will wake up every 1.6 ms. That is very high,
// but should last for 1/2 second or so.
// 5 Mbps over 1/2 second = 2.5 Mbps
// 2.5 Mbps = 312500 bytes = 313 probes at 1000 byte probes
// 313 probes over 1/2 second = 1.6 ms between probes
//
// A few things to note
// 1. When a probe cluster is added, the expected media rate is provided.
// So, the wake up interval takes that into account. For example,
// if probing at 5 Mbps for 1/2 second and if 4 Mbps of it is expected
// to be provided by media traffic, the wake up interval becomes 8 ms.
// 2. The amount of probing should actually be capped at some value to
// avoid too much self-induced congestion. It maybe something like 500 kbps.
// That will increase the wake up interval to 16 ms in the above example.
// 3. In practice, the probing interval may also be shorter. Typically,
// it can be run for 2 - 3 RTTs to get a good measurement. For
// the longest hauls, RTT could be 250 ms or so leading to the probing
// window being long(ish). But, RTT should be much shorter especially if
// the subscriber peer connection of the client is able to connect to
// the nearest data center.
//
package sfu
import (
"sync"
"time"
"github.com/gammazero/deque"
)
type Prober struct {
clustersMu sync.RWMutex
clusters deque.Deque
activeCluster *Cluster
onSendProbe func(bytesToSend int) int
}
func NewProber() *Prober {
p := &Prober{}
p.clusters.SetMinCapacity(2)
return p
}
func (p *Prober) IsRunning() bool {
p.clustersMu.RLock()
defer p.clustersMu.RUnlock()
return p.clusters.Len() > 0
}
func (p *Prober) Reset() {
p.clustersMu.Lock()
// LK-TODO - log if active cluster is getting reset, maybe log state of all clusters
defer p.clustersMu.Unlock()
p.clusters.Clear()
}
func (p *Prober) OnSendProbe(f func(bytesToSend int) int) {
p.onSendProbe = f
}
func (p *Prober) AddCluster(desiredRateBps int, expectedRateBps int, minDuration time.Duration, maxDuration time.Duration) {
if desiredRateBps <= 0 {
return
}
cluster := NewCluster(desiredRateBps, expectedRateBps, minDuration, maxDuration)
// LK-TODO - log information about added cluster
p.pushBackClusterAndMaybeStart(cluster)
}
func (p *Prober) PacketSent(size int) {
cluster := p.getFrontCluster()
if cluster == nil {
return
}
cluster.PacketSent(size)
}
func (p *Prober) getFrontCluster() *Cluster {
p.clustersMu.RLock()
defer p.clustersMu.RUnlock()
if p.activeCluster != nil {
return p.activeCluster
}
if p.clusters.Len() == 0 {
p.activeCluster = nil
} else {
p.activeCluster = p.clusters.Front().(*Cluster)
p.activeCluster.Start()
}
return p.activeCluster
}
func (p *Prober) popFrontCluster(cluster *Cluster) {
p.clustersMu.Lock()
defer p.clustersMu.Unlock()
if p.clusters.Len() == 0 {
p.activeCluster = nil
return
}
if p.clusters.Front().(*Cluster) == cluster {
p.clusters.PopFront()
}
if cluster == p.activeCluster {
p.activeCluster = nil
}
}
func (p *Prober) pushBackClusterAndMaybeStart(cluster *Cluster) {
p.clustersMu.Lock()
defer p.clustersMu.Unlock()
p.clusters.PushBack(cluster)
if p.clusters.Len() == 1 {
go p.run()
}
}
func (p *Prober) run() {
for {
// determine how long to sleep
cluster := p.getFrontCluster()
if cluster == nil {
return
}
time.Sleep(cluster.GetSleepDuration())
// wake up and check for probes to send
cluster = p.getFrontCluster()
if cluster == nil {
return
}
if !cluster.Process(p) {
p.popFrontCluster(cluster)
continue
}
}
}
type Cluster struct {
// LK-TODO-START
// Check if we can operate at cluster level without a lock.
// The quantities that are updated in a different thread are
// bytesSentNonProbe - maybe make this an atomic value
// Lock contention time should be very minimal though.
// LK-TODO-END
lock sync.RWMutex
desiredBytes int
minDuration time.Duration
maxDuration time.Duration
sleepDuration time.Duration
bytesSentProbe int
bytesSentNonProbe int
startTime time.Time
}
func NewCluster(desiredRateBps int, expectedRateBps int, minDuration time.Duration, maxDuration time.Duration) *Cluster {
minDurationMs := minDuration.Milliseconds()
desiredBytes := int((int64(desiredRateBps)*minDurationMs/time.Second.Milliseconds() + 7) / 8)
expectedBytes := int((int64(expectedRateBps)*minDurationMs/time.Second.Milliseconds() + 7) / 8)
// pace based on sending approximately 1000 bytes per probe
numProbes := int((desiredBytes - expectedBytes + 999) / 1000)
sleepDurationMicroSeconds := int(float64(minDurationMs*1000)/float64(numProbes) + 0.5)
c := &Cluster{
desiredBytes: desiredBytes,
minDuration: minDuration,
maxDuration: maxDuration,
sleepDuration: time.Duration(sleepDurationMicroSeconds) * time.Microsecond,
}
return c
}
func (c *Cluster) Start() {
c.lock.Lock()
defer c.lock.Unlock()
if c.startTime.IsZero() {
c.startTime = time.Now()
}
}
func (c *Cluster) GetSleepDuration() time.Duration {
c.lock.RLock()
defer c.lock.RUnlock()
return c.sleepDuration
}
func (c *Cluster) PacketSent(size int) {
c.lock.Lock()
defer c.lock.Unlock()
c.bytesSentNonProbe += size
}
func (c *Cluster) Process(p *Prober) bool {
c.lock.RLock()
// if already past deadline, end the cluster
timeElapsed := time.Since(c.startTime)
if timeElapsed > c.maxDuration {
// LK-TODO log information about short fall in probing
c.lock.RUnlock()
return false
}
// Calculate number of probe bytes that should have been sent since start.
// Overall goal is to send desired number of probe bytes in minDuration.
// However it is possible that timeElapsed is more than minDuration due
// to scheduling variance. When overshooting time budget, use a capped
// short fall if there a grace period given.
windowDone := float64(timeElapsed) / float64(c.minDuration)
if windowDone > 1.0 {
// cluster has been running for longer than minDuration
windowDone = 1.0
}
bytesShouldHaveBeenSent := int(windowDone * float64(c.desiredBytes))
bytesShortFall := bytesShouldHaveBeenSent - c.bytesSentProbe - c.bytesSentNonProbe
if bytesShortFall < 0 {
bytesShortFall = 0
}
// cap short fall to limit to 8 packets in an iteration
// 275 bytes per packet (255 max RTP padding payload + 20 bytes RTP header)
if bytesShortFall > (275 * 8) {
bytesShortFall = 275 * 8
}
// round up to packet size
bytesShortFall = ((bytesShortFall + 274) / 275) * 275
c.lock.RUnlock()
bytesSent := 0
if bytesShortFall > 0 && p.onSendProbe != nil {
bytesSent = p.onSendProbe(bytesShortFall)
}
c.lock.Lock()
c.bytesSentProbe += bytesSent
// do not end cluster until minDuration elapses even if rate is achieved.
// Ensures that the next cluster (if any) does not start early.
if (c.bytesSentProbe+c.bytesSentNonProbe) >= c.desiredBytes && timeElapsed >= c.minDuration {
// LK-TODO - log data about how much time the probe finished compared to min/max
c.lock.Unlock()
return false
}
// LK-TODO look at adapting sleep time based on how many bytes and how much time is left
c.lock.Unlock()
return true
}

448
pkg/sfu/publisher.go Normal file
View File

@@ -0,0 +1,448 @@
package sfu
import (
"fmt"
"io"
"sync"
"sync/atomic"
"time"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/relay"
"github.com/pion/rtcp"
"github.com/pion/transport/packetio"
"github.com/pion/webrtc/v3"
)
type Publisher struct {
mu sync.RWMutex
id string
pc *webrtc.PeerConnection
cfg *WebRTCTransportConfig
router Router
session Session
tracks []PublisherTrack
relayed atomicBool
relayPeers []*relayPeer
candidates []webrtc.ICECandidateInit
onICEConnectionStateChangeHandler atomic.Value // func(webrtc.ICEConnectionState)
onPublisherTrack atomic.Value // func(PublisherTrack)
closeOnce sync.Once
}
type relayPeer struct {
peer *relay.Peer
dcs []*webrtc.DataChannel
withSRReports bool
relayFanOutDataChannels bool
}
type PublisherTrack struct {
Track *webrtc.TrackRemote
Receiver Receiver
// This will be used in the future for tracks that will be relayed as clients or servers
// This is for SVC and Simulcast where you will be able to chose if the relayed peer just
// want a single track (for recording/ processing) or get all the tracks (for load balancing)
clientRelay bool
}
// NewPublisher creates a new Publisher
func NewPublisher(id string, session Session, cfg *WebRTCTransportConfig) (*Publisher, error) {
me, err := getPublisherMediaEngine()
if err != nil {
Logger.Error(err, "NewPeer error", "peer_id", id)
return nil, errPeerConnectionInitFailed
}
api := webrtc.NewAPI(webrtc.WithMediaEngine(me), webrtc.WithSettingEngine(cfg.Setting))
pc, err := api.NewPeerConnection(cfg.Configuration)
if err != nil {
Logger.Error(err, "NewPeer error", "peer_id", id)
return nil, errPeerConnectionInitFailed
}
p := &Publisher{
id: id,
pc: pc,
cfg: cfg,
router: newRouter(id, session, cfg),
session: session,
}
pc.OnTrack(func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver) {
Logger.V(1).Info("Peer got remote track id",
"peer_id", p.id,
"track_id", track.ID(),
"mediaSSRC", track.SSRC(),
"rid", track.RID(),
"stream_id", track.StreamID(),
)
r, pub := p.router.AddReceiver(receiver, track, track.ID(), track.StreamID())
if pub {
p.session.Publish(p.router, r)
p.mu.Lock()
publisherTrack := PublisherTrack{track, r, true}
p.tracks = append(p.tracks, publisherTrack)
for _, rp := range p.relayPeers {
if err = p.createRelayTrack(track, r, rp.peer); err != nil {
Logger.V(1).Error(err, "Creating relay track.", "peer_id", p.id)
}
}
p.mu.Unlock()
if handler, ok := p.onPublisherTrack.Load().(func(PublisherTrack)); ok && handler != nil {
handler(publisherTrack)
}
} else {
p.mu.Lock()
p.tracks = append(p.tracks, PublisherTrack{track, r, false})
p.mu.Unlock()
}
})
pc.OnDataChannel(func(dc *webrtc.DataChannel) {
if dc.Label() == APIChannelLabel {
// terminate api data channel
return
}
p.session.AddDatachannel(id, dc)
})
pc.OnICEConnectionStateChange(func(connectionState webrtc.ICEConnectionState) {
Logger.V(1).Info("ice connection status", "state", connectionState)
switch connectionState {
case webrtc.ICEConnectionStateFailed:
fallthrough
case webrtc.ICEConnectionStateClosed:
Logger.V(1).Info("webrtc ice closed", "peer_id", p.id)
p.Close()
}
if handler, ok := p.onICEConnectionStateChangeHandler.Load().(func(webrtc.ICEConnectionState)); ok && handler != nil {
handler(connectionState)
}
})
p.router.SetRTCPWriter(p.pc.WriteRTCP)
return p, nil
}
func (p *Publisher) Answer(offer webrtc.SessionDescription) (webrtc.SessionDescription, error) {
if err := p.pc.SetRemoteDescription(offer); err != nil {
return webrtc.SessionDescription{}, err
}
for _, c := range p.candidates {
if err := p.pc.AddICECandidate(c); err != nil {
Logger.Error(err, "Add publisher ice candidate to peer err", "peer_id", p.id)
}
}
p.candidates = nil
answer, err := p.pc.CreateAnswer(nil)
if err != nil {
return webrtc.SessionDescription{}, err
}
if err := p.pc.SetLocalDescription(answer); err != nil {
return webrtc.SessionDescription{}, err
}
return answer, nil
}
// GetRouter returns Router with mediaSSRC
func (p *Publisher) GetRouter() Router {
return p.router
}
// Close peer
func (p *Publisher) Close() {
p.closeOnce.Do(func() {
if len(p.relayPeers) > 0 {
p.mu.Lock()
for _, rp := range p.relayPeers {
if err := rp.peer.Close(); err != nil {
Logger.Error(err, "Closing relay peer transport.")
}
}
p.mu.Unlock()
}
p.router.Stop()
if err := p.pc.Close(); err != nil {
Logger.Error(err, "webrtc transport close err")
}
})
}
func (p *Publisher) OnPublisherTrack(f func(track PublisherTrack)) {
p.onPublisherTrack.Store(f)
}
// OnICECandidate handler
func (p *Publisher) OnICECandidate(f func(c *webrtc.ICECandidate)) {
p.pc.OnICECandidate(f)
}
func (p *Publisher) OnICEConnectionStateChange(f func(connectionState webrtc.ICEConnectionState)) {
p.onICEConnectionStateChangeHandler.Store(f)
}
func (p *Publisher) SignalingState() webrtc.SignalingState {
return p.pc.SignalingState()
}
func (p *Publisher) PeerConnection() *webrtc.PeerConnection {
return p.pc
}
// Relay will relay all current and future tracks from current Publisher
func (p *Publisher) Relay(signalFn func(meta relay.PeerMeta, signal []byte) ([]byte, error),
options ...func(r *relayPeer)) (*relay.Peer, error) {
lrp := &relayPeer{}
for _, o := range options {
o(lrp)
}
rp, err := relay.NewPeer(relay.PeerMeta{
PeerID: p.id,
SessionID: p.session.ID(),
}, &relay.PeerConfig{
SettingEngine: p.cfg.Setting,
ICEServers: p.cfg.Configuration.ICEServers,
Logger: Logger,
})
if err != nil {
return nil, fmt.Errorf("relay: %w", err)
}
lrp.peer = rp
rp.OnReady(func() {
peer := p.session.GetPeer(p.id)
p.relayed.set(true)
if lrp.relayFanOutDataChannels {
for _, lbl := range p.session.GetFanOutDataChannelLabels() {
lbl := lbl
dc, err := rp.CreateDataChannel(lbl)
if err != nil {
Logger.V(1).Error(err, "Creating data channels.", "peer_id", p.id)
}
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
if peer == nil || peer.Subscriber() == nil {
return
}
if sdc := peer.Subscriber().DataChannel(lbl); sdc != nil {
if msg.IsString {
if err = sdc.SendText(string(msg.Data)); err != nil {
Logger.Error(err, "Sending dc message err")
}
} else {
if err = sdc.Send(msg.Data); err != nil {
Logger.Error(err, "Sending dc message err")
}
}
}
})
}
}
p.mu.Lock()
for _, tp := range p.tracks {
if !tp.clientRelay {
// simulcast will just relay client track for now
continue
}
if err = p.createRelayTrack(tp.Track, tp.Receiver, rp); err != nil {
Logger.V(1).Error(err, "Creating relay track.", "peer_id", p.id)
}
}
p.relayPeers = append(p.relayPeers, lrp)
p.mu.Unlock()
if lrp.withSRReports {
go p.relayReports(rp)
}
})
rp.OnDataChannel(func(channel *webrtc.DataChannel) {
if !lrp.relayFanOutDataChannels {
return
}
p.mu.Lock()
lrp.dcs = append(lrp.dcs, channel)
p.mu.Unlock()
p.session.AddDatachannel("", channel)
})
if err = rp.Offer(signalFn); err != nil {
return nil, fmt.Errorf("relay: %w", err)
}
return rp, nil
}
func (p *Publisher) PublisherTracks() []PublisherTrack {
p.mu.Lock()
defer p.mu.Unlock()
tracks := make([]PublisherTrack, len(p.tracks))
for idx, track := range p.tracks {
tracks[idx] = track
}
return tracks
}
// AddRelayFanOutDataChannel adds fan out data channel to relayed peers
func (p *Publisher) AddRelayFanOutDataChannel(label string) {
p.mu.RLock()
defer p.mu.RUnlock()
for _, rp := range p.relayPeers {
for _, dc := range rp.dcs {
if dc.Label() == label {
continue
}
}
dc, err := rp.peer.CreateDataChannel(label)
if err != nil {
Logger.V(1).Error(err, "Creating data channels.", "peer_id", p.id)
}
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
p.session.FanOutMessage("", label, msg)
})
}
}
// GetRelayedDataChannels Returns a slice of data channels that belongs to relayed
// peers
func (p *Publisher) GetRelayedDataChannels(label string) []*webrtc.DataChannel {
p.mu.RLock()
defer p.mu.RUnlock()
dcs := make([]*webrtc.DataChannel, 0, len(p.relayPeers))
for _, rp := range p.relayPeers {
for _, dc := range rp.dcs {
if dc.Label() == label {
dcs = append(dcs, dc)
break
}
}
}
return dcs
}
// Relayed returns true if the publisher has been relayed at least once
func (p *Publisher) Relayed() bool {
return p.relayed.get()
}
func (p *Publisher) Tracks() []*webrtc.TrackRemote {
p.mu.RLock()
defer p.mu.RUnlock()
tracks := make([]*webrtc.TrackRemote, len(p.tracks))
for idx, track := range p.tracks {
tracks[idx] = track.Track
}
return tracks
}
// AddICECandidate to peer connection
func (p *Publisher) AddICECandidate(candidate webrtc.ICECandidateInit) error {
if p.pc.RemoteDescription() != nil {
return p.pc.AddICECandidate(candidate)
}
p.candidates = append(p.candidates, candidate)
return nil
}
func (p *Publisher) createRelayTrack(track *webrtc.TrackRemote, receiver Receiver, rp *relay.Peer) error {
codec := track.Codec()
downTrack, err := NewDownTrack(webrtc.RTPCodecCapability{
MimeType: codec.MimeType,
ClockRate: codec.ClockRate,
Channels: codec.Channels,
SDPFmtpLine: codec.SDPFmtpLine,
RTCPFeedback: []webrtc.RTCPFeedback{{"nack", ""}, {"nack", "pli"}},
}, receiver, p.cfg.BufferFactory, p.id, p.cfg.Router.MaxPacketTrack)
if err != nil {
Logger.V(1).Error(err, "Create Relay downtrack err", "peer_id", p.id)
return err
}
sdr, err := rp.AddTrack(receiver.(*WebRTCReceiver).receiver, track, downTrack)
if err != nil {
Logger.V(1).Error(err, "Relaying track.", "peer_id", p.id)
return fmt.Errorf("relay: %w", err)
}
p.cfg.BufferFactory.GetOrNew(packetio.RTCPBufferPacket,
uint32(sdr.GetParameters().Encodings[0].SSRC)).(*buffer.RTCPReader).OnPacket(func(bytes []byte) {
pkts, err := rtcp.Unmarshal(bytes)
if err != nil {
Logger.V(1).Error(err, "Unmarshal rtcp reports", "peer_id", p.id)
return
}
var rpkts []rtcp.Packet
for _, pkt := range pkts {
switch pk := pkt.(type) {
case *rtcp.PictureLossIndication:
rpkts = append(rpkts, &rtcp.PictureLossIndication{
SenderSSRC: pk.MediaSSRC,
MediaSSRC: uint32(track.SSRC()),
})
}
}
if len(rpkts) > 0 {
if err := p.pc.WriteRTCP(rpkts); err != nil {
Logger.V(1).Error(err, "Sending rtcp relay reports", "peer_id", p.id)
}
}
})
downTrack.OnCloseHandler(func() {
if err = sdr.Stop(); err != nil {
Logger.V(1).Error(err, "Stopping relay sender.", "peer_id", p.id)
}
})
receiver.AddDownTrack(downTrack, true)
return nil
}
func (p *Publisher) relayReports(rp *relay.Peer) {
for {
time.Sleep(5 * time.Second)
var r []rtcp.Packet
for _, t := range rp.LocalTracks() {
if dt, ok := t.(*DownTrack); ok {
if !dt.bound.get() {
continue
}
if sr := dt.CreateSenderReport(); sr != nil {
r = append(r, sr)
}
}
}
if len(r) == 0 {
continue
}
if err := rp.WriteRTCP(r); err != nil {
if err == io.EOF || err == io.ErrClosedPipe {
return
}
Logger.Error(err, "Sending downtrack reports err")
}
}
}

708
pkg/sfu/receiver.go Normal file
View File

@@ -0,0 +1,708 @@
package sfu
import (
"io"
"math/rand"
"runtime"
"sync"
"sync/atomic"
"time"
"github.com/gammazero/workerpool"
"github.com/pion/rtcp"
"github.com/pion/rtp"
"github.com/pion/webrtc/v3"
"github.com/rs/zerolog/log"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/stats"
)
// Receiver defines a interface for a track receivers
type Receiver interface {
TrackID() string
StreamID() string
Codec() webrtc.RTPCodecParameters
Kind() webrtc.RTPCodecType
SSRC(layer int) uint32
SetTrackMeta(trackID, streamID string)
AddUpTrack(track *webrtc.TrackRemote, buffer *buffer.Buffer, bestQualityFirst bool)
AddDownTrack(track *DownTrack, bestQualityFirst bool)
SetUpTrackPaused(paused bool)
HasSpatialLayer(layer int32) bool
GetBitrate() [3]uint64
GetBitrateTemporal() [3][4]uint64
GetBitrateTemporalCumulative() [3][4]uint64
GetMaxTemporalLayer() [3]int32
RetransmitPackets(track *DownTrack, packets []packetMeta) error
DeleteDownTrack(peerID string)
OnCloseHandler(fn func())
SendRTCP(p []rtcp.Packet)
SetRTCPCh(ch chan []rtcp.Packet)
GetSenderReportTime(layer int32) (rtpTS uint32, ntpTS uint64)
DebugInfo() map[string]interface{}
}
const (
lostUpdateDelta = 1e9
)
// WebRTCReceiver receives a video track
type WebRTCReceiver struct {
peerID string
trackID string
streamID string
kind webrtc.RTPCodecType
stream string
receiver *webrtc.RTPReceiver
codec webrtc.RTPCodecParameters
stats [3]*stats.Stream
nackWorker *workerpool.WorkerPool
isSimulcast bool
availableLayers atomic.Value
onCloseHandler func()
closeOnce sync.Once
closed atomicBool
trackers [3]*StreamTracker
useTrackers bool
rtcpMu sync.Mutex
rtcpCh chan []rtcp.Packet
lastPli atomicInt64
pliThrottle int64
bufferMu sync.RWMutex
buffers [3]*buffer.Buffer
upTrackMu sync.RWMutex
upTracks [3]*webrtc.TrackRemote
downTrackMu sync.RWMutex
downTracks []*DownTrack
index map[string]int
free map[int]struct{}
numProcs int
lbThreshold int
fracLostMu sync.Mutex
maxDownFracLost uint8
maxDownFracLostTs time.Time
}
type ReceiverOpts func(w *WebRTCReceiver) *WebRTCReceiver
// WithPliThrottle indicates minimum time(ms) between sending PLIs
func WithPliThrottle(period int64) ReceiverOpts {
return func(w *WebRTCReceiver) *WebRTCReceiver {
w.pliThrottle = period * 1e6
return w
}
}
// WithStreamTrackers enables StreamTracker use for simulcast
func WithStreamTrackers() ReceiverOpts {
return func(w *WebRTCReceiver) *WebRTCReceiver {
w.useTrackers = true
return w
}
}
// WithLoadBalanceThreshold enables parallelization of packet writes when downTracks exceeds threshold
// Value should be between 3 and 150.
// For a server handling a few large rooms, use a smaller value (required to handle very large (250+ participant) rooms).
// For a server handling many small rooms, use a larger value or disable.
// Set to 0 (disabled) by default.
func WithLoadBalanceThreshold(downTracks int) ReceiverOpts {
return func(w *WebRTCReceiver) *WebRTCReceiver {
w.lbThreshold = downTracks
return w
}
}
// NewWebRTCReceiver creates a new webrtc track receivers
func NewWebRTCReceiver(receiver *webrtc.RTPReceiver, track *webrtc.TrackRemote, pid string, opts ...ReceiverOpts) Receiver {
w := &WebRTCReceiver{
peerID: pid,
receiver: receiver,
trackID: track.ID(),
streamID: track.StreamID(),
codec: track.Codec(),
kind: track.Kind(),
nackWorker: workerpool.New(1),
isSimulcast: len(track.RID()) > 0,
pliThrottle: 500e6,
downTracks: make([]*DownTrack, 0),
index: make(map[string]int),
free: make(map[int]struct{}),
numProcs: runtime.NumCPU(),
}
if runtime.GOMAXPROCS(0) < w.numProcs {
w.numProcs = runtime.GOMAXPROCS(0)
}
for _, opt := range opts {
w = opt(w)
}
return w
}
func (w *WebRTCReceiver) SetTrackMeta(trackID, streamID string) {
w.streamID = streamID
w.trackID = trackID
}
func (w *WebRTCReceiver) StreamID() string {
return w.streamID
}
func (w *WebRTCReceiver) TrackID() string {
return w.trackID
}
func (w *WebRTCReceiver) SSRC(layer int) uint32 {
w.upTrackMu.RLock()
defer w.upTrackMu.RUnlock()
if track := w.upTracks[layer]; track != nil {
return uint32(track.SSRC())
}
return 0
}
func (w *WebRTCReceiver) Codec() webrtc.RTPCodecParameters {
return w.codec
}
func (w *WebRTCReceiver) Kind() webrtc.RTPCodecType {
return w.kind
}
func (w *WebRTCReceiver) AddUpTrack(track *webrtc.TrackRemote, buff *buffer.Buffer, bestQualityFirst bool) {
if w.closed.get() {
return
}
var layer int32
switch track.RID() {
case fullResolution:
layer = 2
case halfResolution:
layer = 1
default:
layer = 0
}
w.upTrackMu.Lock()
w.upTracks[layer] = track
w.upTrackMu.Unlock()
w.bufferMu.Lock()
w.buffers[layer] = buff
w.bufferMu.Unlock()
if w.isSimulcast {
w.addAvailableLayer(uint16(layer), false)
w.downTrackMu.RLock()
// LK-TODO-START
// DownTrack layer change should not happen directly from here.
// Layer switching should be controlled by StreamAllocator. So, this
// should call into DownTrack to notify availability of a new layer.
// One challenge to think about is that the layer bitrate is not available
// for a second after start up as sfu.Buffer reports at that cadence.
// One possibility is to initialize sfu.Buffer with default bitrate
// based on layer.
// LK-TODO-END
for _, dt := range w.downTracks {
if dt != nil {
if (bestQualityFirst && layer > dt.CurrentSpatialLayer()) ||
(!bestQualityFirst && layer < dt.CurrentSpatialLayer()) {
_ = dt.SwitchSpatialLayer(layer, false)
}
}
}
w.downTrackMu.RUnlock()
// always publish lowest layer
if layer != 0 && w.useTrackers {
tracker := NewStreamTracker()
w.trackers[layer] = tracker
tracker.OnStatusChanged = func(status StreamStatus) {
if status == StreamStatusStopped {
w.removeAvailableLayer(uint16(layer))
} else {
w.addAvailableLayer(uint16(layer), true)
}
}
tracker.Start()
}
}
go w.forwardRTP(layer)
}
// SetUpTrackPaused indicates upstream will not be sending any data.
// this will reflect the "muted" status and will pause streamtracker to ensure we don't turn off
// the layer
func (w *WebRTCReceiver) SetUpTrackPaused(paused bool) {
if !w.isSimulcast {
return
}
w.upTrackMu.Lock()
defer w.upTrackMu.Unlock()
for _, tracker := range w.trackers {
if tracker != nil {
tracker.SetPaused(paused)
}
}
}
func (w *WebRTCReceiver) AddDownTrack(track *DownTrack, bestQualityFirst bool) {
if w.closed.get() {
return
}
layer := 0
w.downTrackMu.RLock()
_, ok := w.index[track.peerID]
w.downTrackMu.RUnlock()
if ok {
return
}
if w.isSimulcast {
w.upTrackMu.RLock()
for i, t := range w.upTracks {
if t != nil && w.HasSpatialLayer(int32(i)) {
layer = i
if !bestQualityFirst {
break
}
}
}
w.upTrackMu.RUnlock()
track.SetInitialLayers(int32(layer), 2)
track.maxSpatialLayer.set(2)
track.maxTemporalLayer.set(2)
track.lastSSRC.set(w.SSRC(layer))
track.trackType = SimulcastDownTrack
track.payload = packetFactory.Get().(*[]byte)
} else {
// LK-TODO-START
// check if any webrtc client does more than one temporal layer when not simulcasting.
// Maybe okay to just set the max temporal layer to 2 even in this case.
// Don't think there is any harm is setting it at 2 if the upper layers are
// not going to be there.
// LK-TODO-END
track.SetInitialLayers(0, 0)
track.trackType = SimpleDownTrack
}
w.storeDownTrack(track)
}
func (w *WebRTCReceiver) HasSpatialLayer(layer int32) bool {
layers, ok := w.availableLayers.Load().([]uint16)
if !ok {
return false
}
desired := uint16(layer)
for _, l := range layers {
if l == desired {
return true
}
}
return false
}
func (w *WebRTCReceiver) downtrackLayerChange(layers []uint16, layerAdded bool) {
w.downTrackMu.RLock()
defer w.downTrackMu.RUnlock()
for _, dt := range w.downTracks {
if dt != nil {
_, _ = dt.UptrackLayersChange(layers, layerAdded)
}
}
}
func (w *WebRTCReceiver) addAvailableLayer(layer uint16, updateDownTrack bool) {
w.upTrackMu.Lock()
layers, ok := w.availableLayers.Load().([]uint16)
if !ok {
layers = []uint16{}
}
hasLayer := false
for _, l := range layers {
if l == layer {
hasLayer = true
break
}
}
if !hasLayer {
layers = append(layers, layer)
}
w.availableLayers.Store(layers)
w.upTrackMu.Unlock()
if updateDownTrack {
w.downtrackLayerChange(layers, true)
}
}
func (w *WebRTCReceiver) removeAvailableLayer(layer uint16) {
w.upTrackMu.Lock()
layers, ok := w.availableLayers.Load().([]uint16)
if !ok {
w.upTrackMu.Unlock()
return
}
newLayers := make([]uint16, 0, 3)
for _, l := range layers {
if l != layer {
newLayers = append(newLayers, l)
}
}
w.availableLayers.Store(newLayers)
w.upTrackMu.Unlock()
// need to immediately switch off unavailable layers
w.downtrackLayerChange(newLayers, false)
}
func (w *WebRTCReceiver) GetBitrate() [3]uint64 {
var br [3]uint64
w.bufferMu.RLock()
defer w.bufferMu.RUnlock()
for i, buff := range w.buffers {
if buff != nil {
if w.HasSpatialLayer(int32(i)) {
br[i] = buff.Bitrate()
} else {
br[i] = 0
}
}
}
return br
}
func (w *WebRTCReceiver) GetBitrateTemporal() [3][4]uint64 {
var br [3][4]uint64
w.bufferMu.RLock()
defer w.bufferMu.RUnlock()
for i, buff := range w.buffers {
if buff != nil {
tls := make([]uint64, 4)
if w.HasSpatialLayer(int32(i)) {
tls = buff.BitrateTemporal()
}
for j := 0; j < len(br[i]); j++ {
br[i][j] = tls[j]
}
}
}
return br
}
func (w *WebRTCReceiver) GetBitrateTemporalCumulative() [3][4]uint64 {
// LK-TODO: For SVC tracks, need to accumulate across spatial layers also
var br [3][4]uint64
w.bufferMu.RLock()
defer w.bufferMu.RUnlock()
for i, buff := range w.buffers {
if buff != nil {
tls := make([]uint64, 4)
if w.HasSpatialLayer(int32(i)) {
tls = buff.BitrateTemporalCumulative()
}
for j := 0; j < len(br[i]); j++ {
br[i][j] = tls[j]
}
}
}
return br
}
func (w *WebRTCReceiver) GetMaxTemporalLayer() [3]int32 {
var tls [3]int32
w.bufferMu.RLock()
defer w.bufferMu.RUnlock()
for i, buff := range w.buffers {
if buff != nil {
tls[i] = buff.MaxTemporalLayer()
}
}
return tls
}
// OnCloseHandler method to be called on remote tracked removed
func (w *WebRTCReceiver) OnCloseHandler(fn func()) {
w.onCloseHandler = fn
}
// DeleteDownTrack removes a DownTrack from a Receiver
func (w *WebRTCReceiver) DeleteDownTrack(peerID string) {
if w.closed.get() {
return
}
w.downTrackMu.Lock()
defer w.downTrackMu.Unlock()
idx, ok := w.index[peerID]
if !ok {
return
}
delete(w.index, peerID)
w.downTracks[idx] = nil
w.free[idx] = struct{}{}
}
func (w *WebRTCReceiver) SendRTCP(p []rtcp.Packet) {
if _, ok := p[0].(*rtcp.PictureLossIndication); ok {
w.rtcpMu.Lock()
defer w.rtcpMu.Unlock()
if time.Now().UnixNano()-w.lastPli.get() < w.pliThrottle {
return
}
w.lastPli.set(time.Now().UnixNano())
}
w.rtcpCh <- p
}
func (w *WebRTCReceiver) SetRTCPCh(ch chan []rtcp.Packet) {
w.rtcpCh = ch
}
func (w *WebRTCReceiver) GetSenderReportTime(layer int32) (rtpTS uint32, ntpTS uint64) {
w.bufferMu.RLock()
defer w.bufferMu.RUnlock()
if w.buffers[layer] != nil {
rtpTS, ntpTS, _ = w.buffers[layer].GetSenderReportData()
}
return
}
func (w *WebRTCReceiver) RetransmitPackets(track *DownTrack, packets []packetMeta) error {
if w.nackWorker.Stopped() {
return io.ErrClosedPipe
}
// LK-TODO: should move down track specific bits into there
w.nackWorker.Submit(func() {
src := packetFactory.Get().(*[]byte)
for _, meta := range packets {
pktBuff := *src
w.bufferMu.RLock()
buff := w.buffers[meta.layer]
w.bufferMu.RUnlock()
if buff == nil {
break
}
i, err := buff.GetPacket(pktBuff, meta.sourceSeqNo)
if err != nil {
if err == io.EOF {
break
}
continue
}
var pkt rtp.Packet
if err = pkt.Unmarshal(pktBuff[:i]); err != nil {
continue
}
pkt.Header.SequenceNumber = meta.targetSeqNo
pkt.Header.Timestamp = meta.timestamp
pkt.Header.SSRC = track.ssrc
pkt.Header.PayloadType = track.payloadType
err = track.MaybeTranslateVP8(&pkt, meta)
if err != nil {
Logger.Error(err, "translating VP8 packet err")
continue
}
err = track.WriteRTPHeaderExtensions(&pkt.Header)
if err != nil {
Logger.Error(err, "writing rtp header extensions err")
continue
}
if _, err = track.writeStream.WriteRTP(&pkt.Header, pkt.Payload); err != nil {
Logger.Error(err, "Writing rtx packet err")
} else {
track.UpdateStats(uint32(i))
}
}
packetFactory.Put(src)
})
return nil
}
func (w *WebRTCReceiver) forwardRTP(layer int32) {
tracker := w.trackers[layer]
defer func() {
w.closeOnce.Do(func() {
w.closed.set(true)
w.closeTracks()
})
if tracker != nil {
tracker.Stop()
}
}()
pli := []rtcp.Packet{
&rtcp.PictureLossIndication{SenderSSRC: rand.Uint32(), MediaSSRC: w.SSRC(int(layer))},
}
for {
w.bufferMu.RLock()
pkt, err := w.buffers[layer].ReadExtended()
w.bufferMu.RUnlock()
if err == io.EOF {
return
}
if tracker != nil {
tracker.Observe(pkt.Packet.SequenceNumber)
}
w.downTrackMu.RLock()
if w.lbThreshold == 0 || len(w.downTracks)-len(w.free) < w.lbThreshold {
// serial - not enough down tracks for parallelization to outweigh overhead
for _, dt := range w.downTracks {
if dt != nil {
w.writeRTP(layer, dt, pkt, pli)
}
}
} else {
// parallel - enables much more efficient multi-core utilization
start := uint64(0)
end := uint64(len(w.downTracks))
// 100µs is enough to amortize the overhead and provide sufficient load balancing.
// WriteRTP takes about 50µs on average, so we write to 2 down tracks per loop.
step := uint64(2)
var wg sync.WaitGroup
wg.Add(w.numProcs)
for p := 0; p < w.numProcs; p++ {
go func() {
defer wg.Done()
for {
n := atomic.AddUint64(&start, step)
if n >= end+step {
return
}
for i := n - step; i < n && i < end; i++ {
if dt := w.downTracks[i]; dt != nil {
w.writeRTP(layer, dt, pkt, pli)
}
}
}
}()
}
wg.Wait()
}
w.downTrackMu.RUnlock()
}
}
func (w *WebRTCReceiver) writeRTP(layer int32, dt *DownTrack, pkt *buffer.ExtPacket, pli []rtcp.Packet) {
// LK-TODO-START
// Ideally this code should also be moved into the DownTrack
// structure to keep things modular. Let the down track code
// make decision on forwarding or not
// LK-TODO-END
if w.isSimulcast {
targetLayer := dt.TargetSpatialLayer()
currentLayer := dt.CurrentSpatialLayer()
if targetLayer == layer && currentLayer != targetLayer {
if pkt.KeyFrame {
dt.SwitchSpatialLayerDone(targetLayer)
currentLayer = targetLayer
} else {
dt.lastPli.set(time.Now().UnixNano())
w.SendRTCP(pli)
}
}
// LK-TODO-START
// Probably need a control here to stop forwarding current layer
// if the current layer is higher than target layer, i. e. target layer
// could have been switched down due to bandwidth constraints and
// continuing to forward higher layer is only going to exacerbate the issue.
// Note that the client might have also requested a lower layer. So, it
// would nice to distinguish between client requested downgrade vs bandwidth
// constrained downgrade and stop higher layer only in the bandwidth
// constrained case.
// LK-TODO-END
if currentLayer != layer {
dt.pktsDropped.add(1)
return
}
}
if err := dt.WriteRTP(pkt, layer); err != nil {
log.Error().Err(err).Str("id", dt.id).Msg("Error writing to down track")
}
}
// closeTracks close all tracks from Receiver
func (w *WebRTCReceiver) closeTracks() {
w.downTrackMu.Lock()
for _, dt := range w.downTracks {
if dt != nil {
dt.Close()
}
}
w.downTracks = make([]*DownTrack, 0)
w.index = make(map[string]int)
w.free = make(map[int]struct{})
w.downTrackMu.Unlock()
w.nackWorker.StopWait()
if w.onCloseHandler != nil {
w.onCloseHandler()
}
}
func (w *WebRTCReceiver) storeDownTrack(track *DownTrack) {
w.downTrackMu.Lock()
defer w.downTrackMu.Unlock()
for idx := range w.free {
w.index[track.peerID] = idx
w.downTracks[idx] = track
delete(w.free, idx)
return
}
w.index[track.peerID] = len(w.downTracks)
w.downTracks = append(w.downTracks, track)
}
func (w *WebRTCReceiver) DebugInfo() map[string]interface{} {
info := map[string]interface{}{
"Simulcast": w.isSimulcast,
"LastPli": w.lastPli,
}
w.upTrackMu.RLock()
upTrackInfo := make([]map[string]interface{}, 0, len(w.upTracks))
for layer, ut := range w.upTracks {
if ut != nil {
upTrackInfo = append(upTrackInfo, map[string]interface{}{
"Layer": layer,
"SSRC": ut.SSRC(),
"Msid": ut.Msid(),
"RID": ut.RID(),
})
}
}
w.upTrackMu.RUnlock()
info["UpTracks"] = upTrackInfo
return info
}

199
pkg/sfu/receiver_test.go Normal file
View File

@@ -0,0 +1,199 @@
package sfu
import (
"fmt"
"hash/fnv"
"math/rand"
"runtime"
"sync"
"sync/atomic"
"testing"
"github.com/gammazero/workerpool"
"github.com/stretchr/testify/assert"
)
func TestWebRTCReceiver_OnCloseHandler(t *testing.T) {
type args struct {
fn func()
}
tests := []struct {
name string
args args
}{
{
name: "Must set on close handler function",
args: args{
fn: func() {},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
w := &WebRTCReceiver{}
w.OnCloseHandler(tt.args.fn)
assert.NotNil(t, w.onCloseHandler)
})
}
}
func BenchmarkWriteRTP(b *testing.B) {
cases := []int{1, 2, 5, 10, 100, 250, 500}
workers := runtime.NumCPU()
wp := workerpool.New(workers)
for _, c := range cases {
// fills each bucket with a max of 50, i.e. []int{50, 50} for c=100
fill := make([]int, 0)
for i := 50; ; i += 50 {
if i > c {
fill = append(fill, c%50)
break
}
fill = append(fill, 50)
if i == c {
break
}
}
// splits c into numCPU buckets, i.e. []int{9, 9, 9, 9, 8, 8, 8, 8, 8, 8, 8, 8} for 12 cpus and c=100
split := make([]int, workers)
for i := range split {
split[i] = c / workers
}
for i := 0; i < c%workers; i++ {
split[i]++
}
b.Run(fmt.Sprintf("%d-Downtracks/Control", c), func(b *testing.B) {
benchmarkNoPool(b, c)
})
b.Run(fmt.Sprintf("%d-Downtracks/Pool(Fill)", c), func(b *testing.B) {
benchmarkPool(b, wp, fill)
})
b.Run(fmt.Sprintf("%d-Downtracks/Pool(Hash)", c), func(b *testing.B) {
benchmarkPool(b, wp, split)
})
b.Run(fmt.Sprintf("%d-Downtracks/Goroutines", c), func(b *testing.B) {
benchmarkGoroutine(b, split)
})
b.Run(fmt.Sprintf("%d-Downtracks/LoadBalanced", c), func(b *testing.B) {
benchmarkLoadBalanced(b, workers, 2, c)
})
b.Run(fmt.Sprintf("%d-Downtracks/LBPool", c), func(b *testing.B) {
benchmarkLoadBalancedPool(b, wp, workers, 2, c)
})
}
}
func benchmarkNoPool(b *testing.B, downTracks int) {
for i := 0; i < b.N; i++ {
for dt := 0; dt < downTracks; dt++ {
writeRTP()
}
}
}
func benchmarkPool(b *testing.B, wp *workerpool.WorkerPool, buckets []int) {
for i := 0; i < b.N; i++ {
var wg sync.WaitGroup
for j := range buckets {
downTracks := buckets[j]
if downTracks == 0 {
continue
}
wg.Add(1)
wp.Submit(func() {
defer wg.Done()
for dt := 0; dt < downTracks; dt++ {
writeRTP()
}
})
}
wg.Wait()
}
}
func benchmarkGoroutine(b *testing.B, buckets []int) {
for i := 0; i < b.N; i++ {
var wg sync.WaitGroup
for j := range buckets {
downTracks := buckets[j]
if downTracks == 0 {
continue
}
wg.Add(1)
go func() {
defer wg.Done()
for dt := 0; dt < downTracks; dt++ {
writeRTP()
}
}()
}
wg.Wait()
}
}
func benchmarkLoadBalanced(b *testing.B, numProcs, step, downTracks int) {
for i := 0; i < b.N; i++ {
start := uint64(0)
step := uint64(step)
end := uint64(downTracks)
var wg sync.WaitGroup
wg.Add(numProcs)
for p := 0; p < numProcs; p++ {
go func() {
defer wg.Done()
for {
n := atomic.AddUint64(&start, step)
if n >= end+step {
return
}
for i := n - step; i < n && i < end; i++ {
writeRTP()
}
}
}()
}
wg.Wait()
}
}
func benchmarkLoadBalancedPool(b *testing.B, wp *workerpool.WorkerPool, numProcs, step, downTracks int) {
for i := 0; i < b.N; i++ {
start := uint64(0)
step := uint64(step)
end := uint64(downTracks)
var wg sync.WaitGroup
wg.Add(numProcs)
for p := 0; p < numProcs; p++ {
wp.Submit(func() {
defer wg.Done()
for {
n := atomic.AddUint64(&start, step)
if n >= end+step {
return
}
for i := n - step; i < n && i < end; i++ {
writeRTP()
}
}
})
}
wg.Wait()
}
}
func writeRTP() {
s := []byte("simulate some work")
stop := 1900 + rand.Intn(200)
for j := 0; j < stop; j++ {
h := fnv.New128()
s = h.Sum(s)
}
}

13
pkg/sfu/relay.go Normal file
View File

@@ -0,0 +1,13 @@
package sfu
func RelayWithFanOutDataChannels() func(r *relayPeer) {
return func(r *relayPeer) {
r.relayFanOutDataChannels = true
}
}
func RelayWithSenderReports() func(r *relayPeer) {
return func(r *relayPeer) {
r.withSRReports = true
}
}

81
pkg/sfu/relay/README.md Normal file
View File

@@ -0,0 +1,81 @@
# Relay
`ion-sfu` supports relaying tracks to other ion-SFUs or other services using the ORTC API.
Using this api allows to quickly send the stream to other services by signaling a single request, after that all the following negotiations are handled internally.
## API
### Relay Peer
The relay peer shares common methods with the Webrtc PeerConnection, so it should be straight forward to use. To create a new relay peer follow below example
:
```go
// Meta holds all the related information of the peer you want to relay.
meta := PeerMeta{
PeerID : "super-villain-1",
SessionID : "world-domination",
}
// config will hold pion/webrtc related structs required for the connection.
// you should fill according your requirements or leave the defaults.
config := &PeerConfig{}
peer, err := NewPeer(meta, config)
handleErr(err)
// Now before working with the peer you need to signal the peer to
// your remote sever, the signaling can be whatever method you want (gRPC, RESt, pubsub, etc..)
signalFunc= func (meta PeerMeta, signal []byte) ([]byte, error){
if meta.session== "world-domination"{
return RelayToLegionOfDoom(meta, signal)
}
return nil, errors.New("not supported")
}
// The remote peer should create a new Relay Peer with the metadata and call Answer.
if err:= peer.Offer(signalFunc); err!=nil{
handleErr(err)
}
// If there are no errors, relay peer offer some convenience methods to communicate with
// Relayed peer.
// Emit will fire and forget to the request event
peer.Emit("evil-plan-1", data)
// Request will wait for a remote answer, use a time cancelled
// context to not block forever if peer does not answer
ans,err:= peer.Request(ctx, "evil-plan-2", data)
// To listen to remote event just attach the callback to peer
peer.OnRequest( func (event string, msg Message){
// to access to request data
msg.Paylod()
// to reply the request
msg.Reply(...)
})
// The Relay Peer also has some convenience callbacks to manage the peer lifespan.
// Peer OnClose is called when the remote peer connection is closed, or the Close method is called
peer.OnClose(func())
// Peer OnReady is called when the relay peer is ready to start negotiating tracks, data channels and request
// is highly recommended to attach all the initialization logic to this callback
peer.OnReady(func())
// To add or receive tracks or data channels the API is similar to webrtc Peer Connection, just listen
// to the required callbacks
peer.OnDataChannel(f func(channel *webrtc.DataChannel))
peer.OnTrack(f func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver))
// Make sure to call below methods after the OnReady callback fired.
peer.CreateDataChannel(label string)
peer.AddTrack(receiver *webrtc.RTPReceiver, remoteTrack *webrtc.TrackRemote,
localTrack webrtc.TrackLocal) (*webrtc.RTPSender, error)
```
### ION-SFU integration
ION-SFU offers some convenience methods for relaying peers in a very simple way.
To relay a peer just call `Peer.Publisher().Relay(...)` then signal the data to the remote SFU and ingest the data using:
`session.AddRelayPeer(peerID string, signalData []byte) ([]byte, error)`
set the []byte response from the method as the response of the signaling. And is ready, everytime a peer joins to the new SFU will negotiate the relayed stream.

657
pkg/sfu/relay/relay.go Normal file
View File

@@ -0,0 +1,657 @@
package relay
import (
"context"
"encoding/json"
"errors"
"fmt"
"math/rand"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/go-logr/logr"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"
)
const (
signalerLabel = "ion_sfu_relay_signaler"
signalerRequestEvent = "ion_relay_request"
)
var (
ErrRelayPeerNotReady = errors.New("relay Peer is not ready")
ErrRelayPeerSignalDone = errors.New("relay Peer signal already called")
ErrRelaySignalDCNotReady = errors.New("relay Peer data channel is not ready")
)
type signal struct {
Encodings *webrtc.RTPCodingParameters `json:"encodings,omitempty"`
ICECandidates []webrtc.ICECandidate `json:"iceCandidates,omitempty"`
ICEParameters webrtc.ICEParameters `json:"iceParameters,omitempty"`
DTLSParameters webrtc.DTLSParameters `json:"dtlsParameters,omitempty"`
SCTPCapabilities *webrtc.SCTPCapabilities `json:"sctpCapabilities,omitempty"`
TrackMeta *TrackMeta `json:"trackInfo,omitempty"`
}
type request struct {
ID uint64 `json:"id"`
IsReply bool `json:"reply"`
Event string `json:"event"`
Payload []byte `json:"payload"`
}
type TrackMeta struct {
StreamID string `json:"streamId"`
TrackID string `json:"trackId"`
CodecParameters *webrtc.RTPCodecParameters `json:"codecParameters,omitempty"`
}
type PeerConfig struct {
SettingEngine webrtc.SettingEngine
ICEServers []webrtc.ICEServer
Logger logr.Logger
}
type PeerMeta struct {
PeerID string `json:"peerId"`
SessionID string `json:"sessionId"`
}
type Options struct {
// RelayMiddlewareDC if set to true middleware data channels will be created and forwarded
// to the relayed peer
RelayMiddlewareDC bool
// RelaySessionDC if set to true fanout data channels will be created and forwarded to the
// relayed peer
RelaySessionDC bool
}
type Peer struct {
mu sync.Mutex
rmu sync.Mutex
me *webrtc.MediaEngine
log logr.Logger
api *webrtc.API
ice *webrtc.ICETransport
rand *rand.Rand
meta PeerMeta
sctp *webrtc.SCTPTransport
dtls *webrtc.DTLSTransport
role *webrtc.ICERole
ready bool
senders []*webrtc.RTPSender
receivers []*webrtc.RTPReceiver
pendingRequests map[uint64]chan []byte
localTracks []webrtc.TrackLocal
signalingDC *webrtc.DataChannel
gatherer *webrtc.ICEGatherer
dcIndex uint16
onReady atomic.Value // func()
onClose atomic.Value // func()
onRequest atomic.Value // func(event string, message Message)
onDataChannel atomic.Value // func(channel *webrtc.DataChannel)
onTrack atomic.Value // func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver, meta *TrackMeta)
}
func NewPeer(meta PeerMeta, conf *PeerConfig) (*Peer, error) {
// Prepare ICE gathering options
iceOptions := webrtc.ICEGatherOptions{
ICEServers: conf.ICEServers,
}
me := webrtc.MediaEngine{}
// Create an API object
api := webrtc.NewAPI(webrtc.WithMediaEngine(&me), webrtc.WithSettingEngine(conf.SettingEngine))
// Create the ICE gatherer
gatherer, err := api.NewICEGatherer(iceOptions)
if err != nil {
return nil, err
}
// Construct the ICE transport
i := api.NewICETransport(gatherer)
// Construct the DTLS transport
dtls, err := api.NewDTLSTransport(i, nil)
// Construct the SCTP transport
sctp := api.NewSCTPTransport(dtls)
if err != nil {
return nil, err
}
p := &Peer{
me: &me,
api: api,
log: conf.Logger,
ice: i,
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
meta: meta,
sctp: sctp,
dtls: dtls,
gatherer: gatherer,
pendingRequests: make(map[uint64]chan []byte),
}
sctp.OnDataChannel(func(channel *webrtc.DataChannel) {
if channel.Label() == signalerLabel {
p.signalingDC = channel
channel.OnMessage(p.handleRequest)
channel.OnOpen(func() {
if f := p.onReady.Load(); f != nil {
f.(func())()
}
})
return
}
if f := p.onDataChannel.Load(); f != nil {
f.(func(dataChannel *webrtc.DataChannel))(channel)
}
})
i.OnConnectionStateChange(func(state webrtc.ICETransportState) {
if state == webrtc.ICETransportStateFailed || state == webrtc.ICETransportStateDisconnected {
if err = p.Close(); err != nil {
p.log.Error(err, "Closing relayed p error")
}
}
})
return p, nil
}
func (p *Peer) ID() string {
return p.meta.PeerID
}
// Offer is used for establish the connection of the local relay Peer
// with the remote relay Peer.
//
// If connection is successful OnReady handler will be called
func (p *Peer) Offer(signalFn func(meta PeerMeta, signal []byte) ([]byte, error)) error {
if p.gatherer.State() != webrtc.ICEGathererStateNew {
return ErrRelayPeerSignalDone
}
ls := &signal{}
gatherFinished := make(chan struct{})
p.gatherer.OnLocalCandidate(func(i *webrtc.ICECandidate) {
if i == nil {
close(gatherFinished)
}
})
// Gather candidates
if err := p.gatherer.Gather(); err != nil {
return err
}
<-gatherFinished
var err error
if ls.ICECandidates, err = p.gatherer.GetLocalCandidates(); err != nil {
return err
}
if ls.ICEParameters, err = p.gatherer.GetLocalParameters(); err != nil {
return err
}
if ls.DTLSParameters, err = p.dtls.GetLocalParameters(); err != nil {
return err
}
sc := p.sctp.GetCapabilities()
ls.SCTPCapabilities = &sc
role := webrtc.ICERoleControlling
p.role = &role
data, err := json.Marshal(ls)
remoteSignal, err := signalFn(p.meta, data)
if err != nil {
return err
}
rs := &signal{}
if err = json.Unmarshal(remoteSignal, rs); err != nil {
return err
}
if err = p.start(rs); err != nil {
return err
}
if p.signalingDC, err = p.createDataChannel(signalerLabel); err != nil {
return err
}
p.signalingDC.OnOpen(func() {
if f := p.onReady.Load(); f != nil {
f.(func())()
}
})
p.signalingDC.OnMessage(p.handleRequest)
return nil
}
// OnClose sets a callback that is called when relay Peer is closed.
func (p *Peer) OnClose(fn func()) {
p.onClose.Store(fn)
}
// Answer answers the remote Peer signal signalRequest
func (p *Peer) Answer(request []byte) ([]byte, error) {
if p.gatherer.State() != webrtc.ICEGathererStateNew {
return nil, ErrRelayPeerSignalDone
}
ls := &signal{}
gatherFinished := make(chan struct{})
p.gatherer.OnLocalCandidate(func(i *webrtc.ICECandidate) {
if i == nil {
close(gatherFinished)
}
})
// Gather candidates
if err := p.gatherer.Gather(); err != nil {
return nil, err
}
<-gatherFinished
var err error
if ls.ICECandidates, err = p.gatherer.GetLocalCandidates(); err != nil {
return nil, err
}
if ls.ICEParameters, err = p.gatherer.GetLocalParameters(); err != nil {
return nil, err
}
if ls.DTLSParameters, err = p.dtls.GetLocalParameters(); err != nil {
return nil, err
}
sc := p.sctp.GetCapabilities()
ls.SCTPCapabilities = &sc
role := webrtc.ICERoleControlled
p.role = &role
rs := &signal{}
if err = json.Unmarshal(request, rs); err != nil {
return nil, err
}
go func() {
if err = p.start(rs); err != nil {
p.log.Error(err, "Error starting relay")
}
}()
return json.Marshal(ls)
}
// WriteRTCP sends a user provided RTCP packet to the connected Peer. If no Peer is connected the
// packet is discarded. It also runs any configured interceptors.
func (p *Peer) WriteRTCP(pkts []rtcp.Packet) error {
_, err := p.dtls.WriteRTCP(pkts)
return err
}
func (p *Peer) LocalTracks() []webrtc.TrackLocal {
return p.localTracks
}
// OnReady calls the callback when relay Peer is ready to start sending/receiving and creating DC
func (p *Peer) OnReady(f func()) {
p.onReady.Store(f)
}
// OnRequest calls the callback when Peer gets a request message from remote Peer
func (p *Peer) OnRequest(f func(event string, msg Message)) {
p.onRequest.Store(f)
}
// OnDataChannel sets an event handler which is invoked when a data
// channel message arrives from a remote Peer.
func (p *Peer) OnDataChannel(f func(channel *webrtc.DataChannel)) {
p.onDataChannel.Store(f)
}
// OnTrack sets an event handler which is called when remote track
// arrives from a remote Peer
func (p *Peer) OnTrack(f func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver, meta *TrackMeta)) {
p.onTrack.Store(f)
}
// Close ends the relay Peer
func (p *Peer) Close() error {
closeErrs := make([]error, 3+len(p.senders)+len(p.receivers))
for _, sdr := range p.senders {
closeErrs = append(closeErrs, sdr.Stop())
}
for _, recv := range p.receivers {
closeErrs = append(closeErrs, recv.Stop())
}
closeErrs = append(closeErrs, p.sctp.Stop(), p.dtls.Stop(), p.ice.Stop())
if f := p.onClose.Load(); f != nil {
f.(func())()
}
return joinErrs(closeErrs...)
}
// CreateDataChannel creates a new DataChannel object with the given label
func (p *Peer) CreateDataChannel(label string) (*webrtc.DataChannel, error) {
p.mu.Lock()
defer p.mu.Unlock()
if !p.ready {
return nil, ErrRelayPeerNotReady
}
return p.createDataChannel(label)
}
func (p *Peer) createDataChannel(label string) (*webrtc.DataChannel, error) {
idx := p.dcIndex
p.dcIndex = +1
dcParams := &webrtc.DataChannelParameters{
Label: label,
ID: &idx,
Ordered: true,
}
return p.api.NewDataChannel(p.sctp, dcParams)
}
func (p *Peer) start(s *signal) error {
if err := p.ice.SetRemoteCandidates(s.ICECandidates); err != nil {
return err
}
if err := p.ice.Start(p.gatherer, s.ICEParameters, p.role); err != nil {
return err
}
if err := p.dtls.Start(s.DTLSParameters); err != nil {
return err
}
if s.SCTPCapabilities != nil {
if err := p.sctp.Start(*s.SCTPCapabilities); err != nil {
return err
}
}
p.ready = true
return nil
}
func (p *Peer) receive(s *signal) error {
var k webrtc.RTPCodecType
switch {
case strings.HasPrefix(s.TrackMeta.CodecParameters.MimeType, "audio/"):
k = webrtc.RTPCodecTypeAudio
case strings.HasPrefix(s.TrackMeta.CodecParameters.MimeType, "video/"):
k = webrtc.RTPCodecTypeVideo
default:
k = webrtc.RTPCodecType(0)
}
if err := p.me.RegisterCodec(*s.TrackMeta.CodecParameters, k); err != nil {
return err
}
recv, err := p.api.NewRTPReceiver(k, p.dtls)
if err != nil {
return err
}
if err = recv.Receive(webrtc.RTPReceiveParameters{Encodings: []webrtc.RTPDecodingParameters{
{
webrtc.RTPCodingParameters{
RID: s.Encodings.RID,
SSRC: s.Encodings.SSRC,
PayloadType: s.Encodings.PayloadType,
},
},
}}); err != nil {
return err
}
recv.SetRTPParameters(webrtc.RTPParameters{
HeaderExtensions: nil,
Codecs: []webrtc.RTPCodecParameters{*s.TrackMeta.CodecParameters},
})
track := recv.Track()
if f := p.onTrack.Load(); f != nil {
f.(func(remote *webrtc.TrackRemote, receiver *webrtc.RTPReceiver, meta *TrackMeta))(track, recv, s.TrackMeta)
}
p.receivers = append(p.receivers, recv)
return nil
}
// AddTrack is used to negotiate a track to the remote peer
func (p *Peer) AddTrack(receiver *webrtc.RTPReceiver, remoteTrack *webrtc.TrackRemote,
localTrack webrtc.TrackLocal) (*webrtc.RTPSender, error) {
p.mu.Lock()
defer p.mu.Unlock()
codec := remoteTrack.Codec()
sdr, err := p.api.NewRTPSender(localTrack, p.dtls)
if err != nil {
return nil, err
}
if err = p.me.RegisterCodec(codec, remoteTrack.Kind()); err != nil {
return nil, err
}
s := &signal{}
s.TrackMeta = &TrackMeta{
StreamID: remoteTrack.StreamID(),
TrackID: remoteTrack.ID(),
CodecParameters: &codec,
}
s.Encodings = &webrtc.RTPCodingParameters{
SSRC: sdr.GetParameters().Encodings[0].SSRC,
PayloadType: remoteTrack.PayloadType(),
}
pld, err := json.Marshal(&s)
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), time.Second*2)
defer cancel()
if _, err = p.Request(ctx, signalerRequestEvent, pld); err != nil {
return nil, err
}
params := receiver.GetParameters()
if err = sdr.Send(webrtc.RTPSendParameters{
RTPParameters: params,
Encodings: []webrtc.RTPEncodingParameters{
{
webrtc.RTPCodingParameters{
SSRC: s.Encodings.SSRC,
PayloadType: s.Encodings.PayloadType,
},
},
},
}); err != nil {
p.log.Error(err, "Send RTPSender failed")
}
p.localTracks = append(p.localTracks, localTrack)
p.senders = append(p.senders, sdr)
return sdr, nil
}
// Emit emits the data argument to remote peer.
func (p *Peer) Emit(event string, data []byte) error {
req := request{
ID: p.rand.Uint64(),
Event: event,
Payload: data,
}
msg, err := json.Marshal(req)
if err != nil {
return err
}
return p.signalingDC.Send(msg)
}
func (p *Peer) Request(ctx context.Context, event string, data []byte) ([]byte, error) {
req := request{
ID: p.rand.Uint64(),
Event: event,
Payload: data,
}
msg, err := json.Marshal(req)
if err != nil {
return nil, err
}
if err = p.signalingDC.Send(msg); err != nil {
return nil, err
}
resp := make(chan []byte, 1)
p.rmu.Lock()
p.pendingRequests[req.ID] = resp
p.rmu.Unlock()
defer func() {
p.rmu.Lock()
delete(p.pendingRequests, req.ID)
p.rmu.Unlock()
}()
select {
case r := <-resp:
return r, nil
case <-ctx.Done():
return nil, ctx.Err()
}
}
func (p *Peer) handleRequest(msg webrtc.DataChannelMessage) {
mr := &request{}
if err := json.Unmarshal(msg.Data, mr); err != nil {
p.log.Error(err, "Error marshaling remote message", "peer_id", p.meta.PeerID, "session_id", p.meta.SessionID)
return
}
if mr.Event == signalerRequestEvent && !mr.IsReply {
p.mu.Lock()
defer p.mu.Unlock()
r := &signal{}
if err := json.Unmarshal(mr.Payload, r); err != nil {
p.log.Error(err, "Error marshaling remote message", "peer_id", p.meta.PeerID, "session_id", p.meta.SessionID)
return
}
if err := p.receive(r); err != nil {
p.log.Error(err, "Error receiving remote track", "peer_id", p.meta.PeerID, "session_id", p.meta.SessionID)
return
}
if err := p.reply(mr.ID, mr.Event, nil); err != nil {
p.log.Error(err, "Error replying message", "peer_id", p.meta.PeerID, "session_id", p.meta.SessionID)
return
}
return
}
if mr.IsReply {
p.rmu.Lock()
if c, ok := p.pendingRequests[mr.ID]; ok {
c <- mr.Payload
delete(p.pendingRequests, mr.ID)
}
p.rmu.Unlock()
return
}
if mr.Event != signalerRequestEvent {
if f := p.onRequest.Load(); f != nil {
f.(func(string, Message))(mr.Event, Message{
p: p,
event: mr.Event,
id: mr.ID,
msg: mr.Payload,
})
}
return
}
}
func (p *Peer) reply(id uint64, event string, payload []byte) error {
req := request{
ID: id,
Event: event,
Payload: payload,
IsReply: true,
}
msg, err := json.Marshal(req)
if err != nil {
return err
}
if err = p.signalingDC.Send(msg); err != nil {
return err
}
return nil
}
func joinErrs(errs ...error) error {
var joinErrsR func(string, int, ...error) error
joinErrsR = func(soFar string, count int, errs ...error) error {
if len(errs) == 0 {
if count == 0 {
return nil
}
return fmt.Errorf(soFar)
}
current := errs[0]
next := errs[1:]
if current == nil {
return joinErrsR(soFar, count, next...)
}
count++
if count == 1 {
return joinErrsR(fmt.Sprintf("%s", current), count, next...)
} else if count == 2 {
return joinErrsR(fmt.Sprintf("1: %s\n2: %s", soFar, current), count, next...)
}
return joinErrsR(fmt.Sprintf("%s\n%d: %s", soFar, count, current), count, next...)
}
return joinErrsR("", 0, errs...)
}
type Message struct {
p *Peer
event string
id uint64
msg []byte
}
func (m *Message) Payload() []byte {
return m.msg
}
func (m *Message) Reply(msg []byte) error {
return m.p.reply(m.id, m.event, msg)
}

205
pkg/sfu/relaypeer.go Normal file
View File

@@ -0,0 +1,205 @@
package sfu
import (
"fmt"
"io"
"sync"
"time"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/relay"
"github.com/pion/rtcp"
"github.com/pion/transport/packetio"
"github.com/pion/webrtc/v3"
)
type RelayPeer struct {
mu sync.RWMutex
peer *relay.Peer
session Session
router Router
config *WebRTCTransportConfig
tracks []PublisherTrack
relayPeers []*relay.Peer
dataChannels []*webrtc.DataChannel
}
func NewRelayPeer(peer *relay.Peer, session Session, config *WebRTCTransportConfig) *RelayPeer {
r := newRouter(peer.ID(), session, config)
r.SetRTCPWriter(peer.WriteRTCP)
rp := &RelayPeer{
peer: peer,
router: r,
config: config,
session: session,
}
peer.OnTrack(func(track *webrtc.TrackRemote, receiver *webrtc.RTPReceiver, meta *relay.TrackMeta) {
if recv, pub := r.AddReceiver(receiver, track, meta.TrackID, meta.StreamID); pub {
recv.SetTrackMeta(meta.TrackID, meta.StreamID)
session.Publish(r, recv)
rp.mu.Lock()
rp.tracks = append(rp.tracks, PublisherTrack{track, recv, true})
for _, lrp := range rp.relayPeers {
if err := rp.createRelayTrack(track, recv, lrp); err != nil {
Logger.V(1).Error(err, "Creating relay track.", "peer_id", peer.ID())
}
}
rp.mu.Unlock()
} else {
rp.mu.Lock()
rp.tracks = append(rp.tracks, PublisherTrack{track, recv, false})
rp.mu.Unlock()
}
})
return rp
}
func (r *RelayPeer) GetRouter() Router {
return r.router
}
func (r *RelayPeer) ID() string {
return r.peer.ID()
}
func (r *RelayPeer) Relay(signalFn func(meta relay.PeerMeta, signal []byte) ([]byte, error)) (*relay.Peer, error) {
rp, err := relay.NewPeer(relay.PeerMeta{
PeerID: r.peer.ID(),
SessionID: r.session.ID(),
}, &relay.PeerConfig{
SettingEngine: r.config.Setting,
ICEServers: r.config.Configuration.ICEServers,
Logger: Logger,
})
if err != nil {
return nil, fmt.Errorf("relay: %w", err)
}
rp.OnReady(func() {
r.mu.Lock()
for _, tp := range r.tracks {
if !tp.clientRelay {
// simulcast will just relay client track for now
continue
}
if err = r.createRelayTrack(tp.Track, tp.Receiver, rp); err != nil {
Logger.V(1).Error(err, "Creating relay track.", "peer_id", r.ID())
}
}
r.relayPeers = append(r.relayPeers, rp)
r.mu.Unlock()
go r.relayReports(rp)
})
rp.OnDataChannel(func(channel *webrtc.DataChannel) {
r.mu.Lock()
r.dataChannels = append(r.dataChannels, channel)
r.mu.Unlock()
r.session.AddDatachannel("", channel)
})
if err = rp.Offer(signalFn); err != nil {
return nil, fmt.Errorf("relay: %w", err)
}
return rp, nil
}
func (r *RelayPeer) DataChannel(label string) *webrtc.DataChannel {
r.mu.RLock()
defer r.mu.RUnlock()
for _, dc := range r.dataChannels {
if dc.Label() == label {
return dc
}
}
return nil
}
func (r *RelayPeer) createRelayTrack(track *webrtc.TrackRemote, receiver Receiver, rp *relay.Peer) error {
codec := track.Codec()
downTrack, err := NewDownTrack(webrtc.RTPCodecCapability{
MimeType: codec.MimeType,
ClockRate: codec.ClockRate,
Channels: codec.Channels,
SDPFmtpLine: codec.SDPFmtpLine,
RTCPFeedback: []webrtc.RTCPFeedback{{"nack", ""}, {"nack", "pli"}},
}, receiver, r.config.BufferFactory, r.ID(), r.config.Router.MaxPacketTrack)
if err != nil {
Logger.V(1).Error(err, "Create Relay downtrack err", "peer_id", r.ID())
return err
}
sdr, err := rp.AddTrack(receiver.(*WebRTCReceiver).receiver, track, downTrack)
if err != nil {
Logger.V(1).Error(err, "Relaying track.", "peer_id", r.ID())
return fmt.Errorf("relay: %w", err)
}
r.config.BufferFactory.GetOrNew(packetio.RTCPBufferPacket,
uint32(sdr.GetParameters().Encodings[0].SSRC)).(*buffer.RTCPReader).OnPacket(func(bytes []byte) {
pkts, err := rtcp.Unmarshal(bytes)
if err != nil {
Logger.V(1).Error(err, "Unmarshal rtcp reports", "peer_id", r.ID())
return
}
var rpkts []rtcp.Packet
for _, pkt := range pkts {
switch pk := pkt.(type) {
case *rtcp.PictureLossIndication:
rpkts = append(rpkts, &rtcp.PictureLossIndication{
SenderSSRC: pk.MediaSSRC,
MediaSSRC: uint32(track.SSRC()),
})
}
}
if len(rpkts) > 0 {
if err := r.peer.WriteRTCP(rpkts); err != nil {
Logger.V(1).Error(err, "Sending rtcp relay reports", "peer_id", r.ID())
}
}
})
downTrack.OnCloseHandler(func() {
if err = sdr.Stop(); err != nil {
Logger.V(1).Error(err, "Stopping relay sender.", "peer_id", r.ID())
}
})
receiver.AddDownTrack(downTrack, true)
return nil
}
func (r *RelayPeer) relayReports(rp *relay.Peer) {
for {
time.Sleep(5 * time.Second)
var packets []rtcp.Packet
for _, t := range rp.LocalTracks() {
if dt, ok := t.(*DownTrack); ok {
if !dt.bound.get() {
continue
}
if sr := dt.CreateSenderReport(); sr != nil {
packets = append(packets, sr)
}
}
}
if len(packets) == 0 {
continue
}
if err := rp.WriteRTCP(packets); err != nil {
if err == io.EOF || err == io.ErrClosedPipe {
return
}
Logger.Error(err, "Sending downtrack reports err")
}
}
}

347
pkg/sfu/router.go Normal file
View File

@@ -0,0 +1,347 @@
package sfu
import (
"sync"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/stats"
"github.com/livekit/livekit-server/pkg/sfu/twcc"
)
// Router defines a track rtp/rtcp Router
type Router interface {
ID() string
AddReceiver(receiver *webrtc.RTPReceiver, track *webrtc.TrackRemote, trackID, streamID string) (Receiver, bool)
AddDownTracks(s *Subscriber, r Receiver) error
SetRTCPWriter(func([]rtcp.Packet) error)
AddDownTrack(s *Subscriber, r Receiver) (*DownTrack, error)
Stop()
}
// RouterConfig defines Router configurations
type RouterConfig struct {
WithStats bool `mapstructure:"withstats"`
MaxBandwidth uint64 `mapstructure:"maxbandwidth"`
MaxPacketTrack int `mapstructure:"maxpackettrack"`
AudioLevelInterval int `mapstructure:"audiolevelinterval"`
AudioLevelThreshold uint8 `mapstructure:"audiolevelthreshold"`
AudioLevelFilter int `mapstructure:"audiolevelfilter"`
Simulcast SimulcastConfig `mapstructure:"simulcast"`
PliThrottle int64 `mapstructure:"plithrottle"`
}
type router struct {
sync.RWMutex
id string
twcc *twcc.Responder
stats map[uint32]*stats.Stream
rtcpCh chan []rtcp.Packet
stopCh chan struct{}
config RouterConfig
session Session
receivers map[string]Receiver
bufferFactory *buffer.Factory
writeRTCP func([]rtcp.Packet) error
}
// newRouter for routing rtp/rtcp packets
func newRouter(id string, session Session, config *WebRTCTransportConfig) Router {
ch := make(chan []rtcp.Packet, 10)
r := &router{
id: id,
rtcpCh: ch,
stopCh: make(chan struct{}),
config: config.Router,
session: session,
receivers: make(map[string]Receiver),
stats: make(map[uint32]*stats.Stream),
bufferFactory: config.BufferFactory,
}
if config.Router.WithStats {
stats.Peers.Inc()
}
return r
}
func (r *router) ID() string {
return r.id
}
func (r *router) Stop() {
r.stopCh <- struct{}{}
if r.config.WithStats {
stats.Peers.Dec()
}
}
func (r *router) AddReceiver(receiver *webrtc.RTPReceiver, track *webrtc.TrackRemote, trackID, streamID string) (Receiver, bool) {
r.Lock()
defer r.Unlock()
publish := false
buff, rtcpReader := r.bufferFactory.GetBufferPair(uint32(track.SSRC()))
buff.OnFeedback(func(fb []rtcp.Packet) {
r.rtcpCh <- fb
})
if track.Kind() == webrtc.RTPCodecTypeAudio {
buff.OnAudioLevel(func(level uint8) {
r.session.AudioObserver().observe(streamID, level)
})
r.session.AudioObserver().addStream(streamID)
} else if track.Kind() == webrtc.RTPCodecTypeVideo {
if r.twcc == nil {
r.twcc = twcc.NewTransportWideCCResponder(uint32(track.SSRC()))
r.twcc.OnFeedback(func(p rtcp.RawPacket) {
r.rtcpCh <- []rtcp.Packet{&p}
})
}
buff.OnTransportWideCC(func(sn uint16, timeNS int64, marker bool) {
r.twcc.Push(sn, timeNS, marker)
})
}
if r.config.WithStats {
r.stats[uint32(track.SSRC())] = stats.NewStream(buff)
}
rtcpReader.OnPacket(func(bytes []byte) {
pkts, err := rtcp.Unmarshal(bytes)
if err != nil {
Logger.Error(err, "Unmarshal rtcp receiver packets err")
return
}
for _, pkt := range pkts {
switch pkt := pkt.(type) {
case *rtcp.SourceDescription:
if r.config.WithStats {
for _, chunk := range pkt.Chunks {
if s, ok := r.stats[chunk.Source]; ok {
for _, item := range chunk.Items {
if item.Type == rtcp.SDESCNAME {
s.SetCName(item.Text)
}
}
}
}
}
case *rtcp.SenderReport:
buff.SetSenderReportData(pkt.RTPTime, pkt.NTPTime)
if r.config.WithStats {
if st := r.stats[pkt.SSRC]; st != nil {
r.updateStats(st)
}
}
}
}
})
recv, ok := r.receivers[trackID]
if !ok {
recv = NewWebRTCReceiver(receiver, track, r.id, WithPliThrottle(r.config.PliThrottle*1e6))
r.receivers[trackID] = recv
recv.SetRTCPCh(r.rtcpCh)
recv.OnCloseHandler(func() {
if r.config.WithStats {
if track.Kind() == webrtc.RTPCodecTypeVideo {
stats.VideoTracks.Dec()
} else {
stats.AudioTracks.Dec()
}
}
if recv.Kind() == webrtc.RTPCodecTypeAudio {
r.session.AudioObserver().removeStream(track.StreamID())
}
r.deleteReceiver(trackID, uint32(track.SSRC()))
})
publish = true
}
recv.AddUpTrack(track, buff, r.config.Simulcast.BestQualityFirst)
buff.Bind(receiver.GetParameters(), buffer.Options{
MaxBitRate: r.config.MaxBandwidth,
})
if r.config.WithStats {
if track.Kind() == webrtc.RTPCodecTypeVideo {
stats.VideoTracks.Inc()
} else {
stats.AudioTracks.Inc()
}
}
return recv, publish
}
func (r *router) AddDownTracks(s *Subscriber, recv Receiver) error {
r.Lock()
defer r.Unlock()
if s.noAutoSubscribe {
Logger.Info("peer turns off automatic subscription, skip tracks add")
return nil
}
if recv != nil {
if _, err := r.AddDownTrack(s, recv); err != nil {
return err
}
s.negotiate()
return nil
}
if len(r.receivers) > 0 {
for _, rcv := range r.receivers {
if _, err := r.AddDownTrack(s, rcv); err != nil {
return err
}
}
s.negotiate()
}
return nil
}
func (r *router) SetRTCPWriter(fn func(packet []rtcp.Packet) error) {
r.writeRTCP = fn
go r.sendRTCP()
}
func (r *router) AddDownTrack(sub *Subscriber, recv Receiver) (*DownTrack, error) {
for _, dt := range sub.GetDownTracks(recv.StreamID()) {
if dt.ID() == recv.TrackID() {
return dt, nil
}
}
codec := recv.Codec()
if err := sub.me.RegisterCodec(codec, recv.Kind()); err != nil {
return nil, err
}
downTrack, err := NewDownTrack(webrtc.RTPCodecCapability{
MimeType: codec.MimeType,
ClockRate: codec.ClockRate,
Channels: codec.Channels,
SDPFmtpLine: codec.SDPFmtpLine,
RTCPFeedback: []webrtc.RTCPFeedback{{"goog-remb", ""}, {"nack", ""}, {"nack", "pli"}},
}, recv, r.bufferFactory, sub.id, r.config.MaxPacketTrack)
if err != nil {
return nil, err
}
// Create webrtc sender for the peer we are sending track to
if downTrack.transceiver, err = sub.pc.AddTransceiverFromTrack(downTrack, webrtc.RTPTransceiverInit{
Direction: webrtc.RTPTransceiverDirectionSendonly,
}); err != nil {
return nil, err
}
// nolint:scopelint
downTrack.OnCloseHandler(func() {
if sub.pc.ConnectionState() != webrtc.PeerConnectionStateClosed {
if err := sub.pc.RemoveTrack(downTrack.transceiver.Sender()); err != nil {
if err == webrtc.ErrConnectionClosed {
return
}
Logger.Error(err, "Error closing down track")
} else {
sub.RemoveDownTrack(recv.StreamID(), downTrack)
sub.negotiate()
}
}
})
downTrack.OnBind(func() {
go sub.sendStreamDownTracksReports(recv.StreamID())
})
sub.AddDownTrack(recv.StreamID(), downTrack)
recv.AddDownTrack(downTrack, r.config.Simulcast.BestQualityFirst)
return downTrack, nil
}
func (r *router) deleteReceiver(track string, ssrc uint32) {
r.Lock()
delete(r.receivers, track)
delete(r.stats, ssrc)
r.Unlock()
}
func (r *router) sendRTCP() {
for {
select {
case pkts := <-r.rtcpCh:
if err := r.writeRTCP(pkts); err != nil {
Logger.Error(err, "Write rtcp to peer err", "peer_id", r.id)
}
case <-r.stopCh:
return
}
}
}
func (r *router) updateStats(stream *stats.Stream) {
calculateLatestMinMaxSenderNtpTime := func(cname string) (minPacketNtpTimeInMillisSinceSenderEpoch uint64, maxPacketNtpTimeInMillisSinceSenderEpoch uint64) {
if len(cname) < 1 {
return
}
r.RLock()
defer r.RUnlock()
for _, s := range r.stats {
if s.GetCName() != cname {
continue
}
clockRate := s.Buffer.GetClockRate()
srrtp, srntp, _ := s.Buffer.GetSenderReportData()
latestTimestamp, _ := s.Buffer.GetLatestTimestamp()
fastForwardTimestampInClockRate := fastForwardTimestampAmount(latestTimestamp, srrtp)
fastForwardTimestampInMillis := (fastForwardTimestampInClockRate * 1000) / clockRate
latestPacketNtpTimeInMillisSinceSenderEpoch := ntpToMillisSinceEpoch(srntp) + uint64(fastForwardTimestampInMillis)
if 0 == minPacketNtpTimeInMillisSinceSenderEpoch || latestPacketNtpTimeInMillisSinceSenderEpoch < minPacketNtpTimeInMillisSinceSenderEpoch {
minPacketNtpTimeInMillisSinceSenderEpoch = latestPacketNtpTimeInMillisSinceSenderEpoch
}
if 0 == maxPacketNtpTimeInMillisSinceSenderEpoch || latestPacketNtpTimeInMillisSinceSenderEpoch > maxPacketNtpTimeInMillisSinceSenderEpoch {
maxPacketNtpTimeInMillisSinceSenderEpoch = latestPacketNtpTimeInMillisSinceSenderEpoch
}
}
return minPacketNtpTimeInMillisSinceSenderEpoch, maxPacketNtpTimeInMillisSinceSenderEpoch
}
setDrift := func(cname string, driftInMillis uint64) {
if len(cname) < 1 {
return
}
r.RLock()
defer r.RUnlock()
for _, s := range r.stats {
if s.GetCName() != cname {
continue
}
s.SetDriftInMillis(driftInMillis)
}
}
cname := stream.GetCName()
minPacketNtpTimeInMillisSinceSenderEpoch, maxPacketNtpTimeInMillisSinceSenderEpoch := calculateLatestMinMaxSenderNtpTime(cname)
driftInMillis := maxPacketNtpTimeInMillisSinceSenderEpoch - minPacketNtpTimeInMillisSinceSenderEpoch
setDrift(cname, driftInMillis)
stream.CalcStats()
}

161
pkg/sfu/sequencer.go Normal file
View File

@@ -0,0 +1,161 @@
package sfu
import (
"sync"
"time"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
)
const (
ignoreRetransmission = 100 // Ignore packet retransmission after ignoreRetransmission milliseconds
)
type packetMeta struct {
// Original sequence number from stream.
// The original sequence number is used to find the original
// packet from publisher
sourceSeqNo uint16
// Modified sequence number after offset.
// This sequence number is used for the associated
// down track, is modified according the offsets, and
// must not be shared
targetSeqNo uint16
// Modified timestamp for current associated
// down track.
timestamp uint32
// The last time this packet was nack requested.
// Sometimes clients request the same packet more than once, so keep
// track of the requested packets helps to avoid writing multiple times
// the same packet.
// The resolution is 1 ms counting after the sequencer start time.
lastNack uint32
// Spatial layer of packet
layer uint8
// Information that differs depending the codec
misc uint64
}
func (p *packetMeta) setVP8PayloadMeta(tlz0Idx uint8, picID uint16) {
p.misc = uint64(tlz0Idx)<<16 | uint64(picID)
}
func (p *packetMeta) getVP8PayloadMeta() (uint8, uint16) {
return uint8(p.misc >> 16), uint16(p.misc)
}
func (p *packetMeta) packVP8(vp8 *buffer.VP8) {
p.misc = uint64(vp8.FirstByte)<<56 |
uint64(vp8.PictureIDPresent)<<55 |
uint64(vp8.TL0PICIDXPresent)<<54 |
uint64(vp8.TIDPresent)<<53 |
uint64(vp8.KEYIDXPresent)<<52 |
uint64(vp8.PictureID)<<32 |
uint64(vp8.TL0PICIDX)<<24 |
uint64(vp8.TID)<<22 |
uint64(vp8.Y)<<21 |
uint64(vp8.KEYIDX)<<16 |
uint64(vp8.HeaderSize)<<8
}
func (p *packetMeta) unpackVP8() *buffer.VP8 {
return &buffer.VP8{
FirstByte: byte(p.misc >> 56),
PictureIDPresent: int((p.misc >> 55) & 0x1),
PictureID: uint16((p.misc >> 32) & 0xFFFF),
TL0PICIDXPresent: int((p.misc >> 54) & 0x1),
TL0PICIDX: uint8((p.misc >> 24) & 0xFF),
TIDPresent: int((p.misc >> 53) & 0x1),
TID: uint8((p.misc >> 22) & 0x3),
Y: uint8((p.misc >> 21) & 0x1),
KEYIDXPresent: int((p.misc >> 52) & 0x1),
KEYIDX: uint8((p.misc >> 16) & 0x1F),
HeaderSize: int((p.misc >> 8) & 0xFF),
}
}
// Sequencer stores the packet sequence received by the down track
type sequencer struct {
sync.Mutex
init bool
max int
seq []packetMeta
step int
headSN uint16
startTime int64
}
func newSequencer(maxTrack int) *sequencer {
return &sequencer{
startTime: time.Now().UnixNano() / 1e6,
max: maxTrack,
seq: make([]packetMeta, maxTrack),
}
}
func (n *sequencer) push(sn, offSn uint16, timeStamp uint32, layer uint8, head bool) *packetMeta {
n.Lock()
defer n.Unlock()
if !n.init {
n.headSN = offSn
n.init = true
}
step := 0
if head {
inc := offSn - n.headSN
for i := uint16(1); i < inc; i++ {
n.step++
if n.step >= n.max {
n.step = 0
}
}
step = n.step
n.headSN = offSn
} else {
step = n.step - int(n.headSN-offSn)
if step < 0 {
if step*-1 >= n.max {
Logger.V(0).Info("Old packet received, can not be sequenced", "head", sn, "received", offSn)
return nil
}
step = n.max + step
}
}
n.seq[n.step] = packetMeta{
sourceSeqNo: sn,
targetSeqNo: offSn,
timestamp: timeStamp,
layer: layer,
}
pm := &n.seq[n.step]
n.step++
if n.step >= n.max {
n.step = 0
}
return pm
}
func (n *sequencer) getSeqNoPairs(seqNo []uint16) []packetMeta {
n.Lock()
meta := make([]packetMeta, 0, 17)
refTime := uint32(time.Now().UnixNano()/1e6 - n.startTime)
for _, sn := range seqNo {
step := n.step - int(n.headSN-sn) - 1
if step < 0 {
if step*-1 >= n.max {
continue
}
step = n.max + step
}
seq := &n.seq[step]
if seq.targetSeqNo == sn {
if seq.lastNack == 0 || refTime-seq.lastNack > ignoreRetransmission {
seq.lastNack = refTime
meta = append(meta, *seq)
}
}
}
n.Unlock()
return meta
}

99
pkg/sfu/sequencer_test.go Normal file
View File

@@ -0,0 +1,99 @@
package sfu
import (
"reflect"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func Test_sequencer(t *testing.T) {
seq := newSequencer(500)
off := uint16(15)
for i := uint16(1); i < 520; i++ {
seq.push(i, i+off, 123, 2, true)
}
time.Sleep(60 * time.Millisecond)
req := []uint16{57, 58, 62, 63, 513, 514, 515, 516, 517}
res := seq.getSeqNoPairs(req)
assert.Equal(t, len(req), len(res))
for i, val := range res {
assert.Equal(t, val.targetSeqNo, req[i])
assert.Equal(t, val.sourceSeqNo, req[i]-off)
assert.Equal(t, val.layer, uint8(2))
}
res = seq.getSeqNoPairs(req)
assert.Equal(t, 0, len(res))
time.Sleep(150 * time.Millisecond)
res = seq.getSeqNoPairs(req)
assert.Equal(t, len(req), len(res))
for i, val := range res {
assert.Equal(t, val.targetSeqNo, req[i])
assert.Equal(t, val.sourceSeqNo, req[i]-off)
assert.Equal(t, val.layer, uint8(2))
}
s := seq.push(521, 521+off, 123, 1, true)
var (
tlzIdx = uint8(15)
picID = uint16(16)
)
s.setVP8PayloadMeta(tlzIdx, picID)
s.sourceSeqNo = 12
m := seq.getSeqNoPairs([]uint16{521 + off})
assert.Equal(t, 1, len(m))
tlz0, pID := m[0].getVP8PayloadMeta()
assert.Equal(t, tlzIdx, tlz0)
assert.Equal(t, picID, pID)
}
func Test_sequencer_getNACKSeqNo(t *testing.T) {
type args struct {
seqNo []uint16
}
type fields struct {
input []uint16
offset uint16
}
tests := []struct {
name string
fields fields
args args
want []uint16
}{
{
name: "Should get correct seq numbers",
fields: fields{
input: []uint16{2, 3, 4, 7, 8},
offset: 5,
},
args: args{
seqNo: []uint16{4 + 5, 5 + 5, 8 + 5},
},
want: []uint16{4, 8},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
n := newSequencer(500)
for _, i := range tt.fields.input {
n.push(i, i+tt.fields.offset, 123, 3, true)
}
g := n.getSeqNoPairs(tt.args.seqNo)
var got []uint16
for _, sn := range g {
got = append(got, sn.sourceSeqNo)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("getSeqNoPairs() = %v, want %v", got, tt.want)
}
})
}
}

414
pkg/sfu/session.go Normal file
View File

@@ -0,0 +1,414 @@
package sfu
import (
"encoding/json"
"sync"
"time"
"github.com/pion/webrtc/v3"
"github.com/rs/zerolog/log"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/livekit/livekit-server/pkg/sfu/logger"
"github.com/livekit/livekit-server/pkg/sfu/relay"
)
// Session represents a set of peers. Transports inside a SessionLocal
// are automatically subscribed to each other.
type Session interface {
ID() string
Publish(router Router, r Receiver)
Subscribe(peer Peer)
AddPeer(peer Peer)
GetPeer(peerID string) Peer
RemovePeer(peer Peer)
AddRelayPeer(peerID string, signalData []byte) ([]byte, error)
AudioObserver() *AudioObserver
AddDatachannel(owner string, dc *webrtc.DataChannel)
GetDCMiddlewares() []*Datachannel
GetFanOutDataChannelLabels() []string
GetDataChannels(peerID, label string) (dcs []*webrtc.DataChannel)
FanOutMessage(origin, label string, msg webrtc.DataChannelMessage)
Peers() []Peer
RelayPeers() []*RelayPeer
}
type SessionLocal struct {
id string
mu sync.RWMutex
config WebRTCTransportConfig
peers map[string]Peer
relayPeers map[string]*RelayPeer
closed atomicBool
audioObs *AudioObserver
fanOutDCs []string
datachannels []*Datachannel
onCloseHandler func()
}
const (
AudioLevelsMethod = "audioLevels"
)
// NewSession creates a new SessionLocal
func NewSession(id string, dcs []*Datachannel, cfg WebRTCTransportConfig) Session {
s := &SessionLocal{
id: id,
peers: make(map[string]Peer),
relayPeers: make(map[string]*RelayPeer),
datachannels: dcs,
config: cfg,
audioObs: NewAudioObserver(cfg.Router.AudioLevelThreshold, cfg.Router.AudioLevelInterval, cfg.Router.AudioLevelFilter),
}
go s.audioLevelObserver(cfg.Router.AudioLevelInterval)
return s
}
// ID return SessionLocal id
func (s *SessionLocal) ID() string {
return s.id
}
func (s *SessionLocal) AudioObserver() *AudioObserver {
return s.audioObs
}
func (s *SessionLocal) GetDCMiddlewares() []*Datachannel {
return s.datachannels
}
func (s *SessionLocal) GetFanOutDataChannelLabels() []string {
s.mu.RLock()
defer s.mu.RUnlock()
fanout := make([]string, len(s.fanOutDCs))
copy(fanout, s.fanOutDCs)
return fanout
}
func (s *SessionLocal) AddPeer(peer Peer) {
s.mu.Lock()
s.peers[peer.ID()] = peer
s.mu.Unlock()
}
func (s *SessionLocal) GetPeer(peerID string) Peer {
s.mu.RLock()
defer s.mu.RUnlock()
return s.peers[peerID]
}
func (s *SessionLocal) AddRelayPeer(peerID string, signalData []byte) ([]byte, error) {
p, err := relay.NewPeer(relay.PeerMeta{
PeerID: peerID,
SessionID: s.id,
}, &relay.PeerConfig{
SettingEngine: s.config.Setting,
ICEServers: s.config.Configuration.ICEServers,
Logger: logger.New(),
})
if err != nil {
log.Err(err).Msg("Creating relay peer")
return nil, status.Error(codes.Internal, err.Error())
}
resp, err := p.Answer(signalData)
if err != nil {
log.Err(err).Msg("Creating answer for relay")
return nil, err
}
p.OnReady(func() {
rp := NewRelayPeer(p, s, &s.config)
s.mu.Lock()
s.relayPeers[peerID] = rp
s.mu.Unlock()
})
p.OnClose(func() {
s.mu.Lock()
delete(s.relayPeers, peerID)
s.mu.Unlock()
})
return resp, nil
}
func (s *SessionLocal) GetRelayPeer(peerID string) *RelayPeer {
s.mu.RLock()
defer s.mu.RUnlock()
return s.relayPeers[peerID]
}
// RemovePeer removes Peer from the SessionLocal
func (s *SessionLocal) RemovePeer(p Peer) {
pid := p.ID()
Logger.V(0).Info("RemovePeer from SessionLocal", "peer_id", pid, "session_id", s.id)
s.mu.Lock()
if s.peers[pid] == p {
delete(s.peers, pid)
}
peerCount := len(s.peers)
s.mu.Unlock()
// Close SessionLocal if no peers
if peerCount == 0 {
s.Close()
}
}
func (s *SessionLocal) AddDatachannel(owner string, dc *webrtc.DataChannel) {
label := dc.Label()
s.mu.Lock()
for _, lbl := range s.fanOutDCs {
if label == lbl {
s.mu.Unlock()
return
}
}
s.fanOutDCs = append(s.fanOutDCs, label)
peerOwner := s.peers[owner]
s.mu.Unlock()
peers := s.Peers()
peerOwner.Subscriber().RegisterDatachannel(label, dc)
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
s.FanOutMessage(owner, label, msg)
})
for _, p := range peers {
peer := p
if peer.ID() == owner || peer.Subscriber() == nil {
continue
}
ndc, err := peer.Subscriber().AddDataChannel(label)
if err != nil {
Logger.Error(err, "error adding datachannel")
continue
}
if peer.Publisher() != nil && peer.Publisher().Relayed() {
peer.Publisher().AddRelayFanOutDataChannel(label)
}
pid := peer.ID()
ndc.OnMessage(func(msg webrtc.DataChannelMessage) {
s.FanOutMessage(pid, label, msg)
if peer.Publisher().Relayed() {
for _, rdc := range peer.Publisher().GetRelayedDataChannels(label) {
if msg.IsString {
if err = rdc.SendText(string(msg.Data)); err != nil {
Logger.Error(err, "Sending dc message err")
}
} else {
if err = rdc.Send(msg.Data); err != nil {
Logger.Error(err, "Sending dc message err")
}
}
}
}
})
peer.Subscriber().negotiate()
}
}
// Publish will add a Sender to all peers in current SessionLocal from given
// Receiver
func (s *SessionLocal) Publish(router Router, r Receiver) {
for _, p := range s.Peers() {
// Don't sub to self
if router.ID() == p.ID() || p.Subscriber() == nil {
continue
}
Logger.V(0).Info("Publishing track to peer", "peer_id", p.ID())
if err := router.AddDownTracks(p.Subscriber(), r); err != nil {
Logger.Error(err, "Error subscribing transport to Router")
continue
}
}
}
// Subscribe will create a Sender for every other Receiver in the SessionLocal
func (s *SessionLocal) Subscribe(peer Peer) {
s.mu.RLock()
fdc := make([]string, len(s.fanOutDCs))
copy(fdc, s.fanOutDCs)
peers := make([]Peer, 0, len(s.peers))
for _, p := range s.peers {
if p == peer || p.Publisher() == nil {
continue
}
peers = append(peers, p)
}
s.mu.RUnlock()
// Subscribe to fan out data channels
for _, label := range fdc {
dc, err := peer.Subscriber().AddDataChannel(label)
if err != nil {
Logger.Error(err, "error adding datachannel")
continue
}
l := label
dc.OnMessage(func(msg webrtc.DataChannelMessage) {
s.FanOutMessage(peer.ID(), l, msg)
if peer.Publisher().Relayed() {
for _, rdc := range peer.Publisher().GetRelayedDataChannels(l) {
if msg.IsString {
if err = rdc.SendText(string(msg.Data)); err != nil {
Logger.Error(err, "Sending dc message err")
}
} else {
if err = rdc.Send(msg.Data); err != nil {
Logger.Error(err, "Sending dc message err")
}
}
}
}
})
}
// Subscribe to publisher streams
for _, p := range peers {
err := p.Publisher().GetRouter().AddDownTracks(peer.Subscriber(), nil)
if err != nil {
Logger.Error(err, "Subscribing to Router err")
continue
}
}
// Subscribe to relay streams
for _, p := range s.RelayPeers() {
err := p.GetRouter().AddDownTracks(peer.Subscriber(), nil)
if err != nil {
Logger.Error(err, "Subscribing to Router err")
continue
}
}
peer.Subscriber().negotiate()
}
// Peers returns peers in this SessionLocal
func (s *SessionLocal) Peers() []Peer {
s.mu.RLock()
defer s.mu.RUnlock()
p := make([]Peer, 0, len(s.peers))
for _, peer := range s.peers {
p = append(p, peer)
}
return p
}
// RelayPeers returns relay peers in this SessionLocal
func (s *SessionLocal) RelayPeers() []*RelayPeer {
s.mu.RLock()
defer s.mu.RUnlock()
p := make([]*RelayPeer, 0, len(s.peers))
for _, peer := range s.relayPeers {
p = append(p, peer)
}
return p
}
// OnClose is called when the SessionLocal is closed
func (s *SessionLocal) OnClose(f func()) {
s.onCloseHandler = f
}
func (s *SessionLocal) Close() {
if !s.closed.set(true) {
return
}
if s.onCloseHandler != nil {
s.onCloseHandler()
}
}
func (s *SessionLocal) FanOutMessage(origin, label string, msg webrtc.DataChannelMessage) {
dcs := s.GetDataChannels(origin, label)
for _, dc := range dcs {
if msg.IsString {
if err := dc.SendText(string(msg.Data)); err != nil {
Logger.Error(err, "Sending dc message err")
}
} else {
if err := dc.Send(msg.Data); err != nil {
Logger.Error(err, "Sending dc message err")
}
}
}
}
func (s *SessionLocal) GetDataChannels(peerID, label string) []*webrtc.DataChannel {
s.mu.RLock()
defer s.mu.RUnlock()
dcs := make([]*webrtc.DataChannel, 0, len(s.peers))
for pid, p := range s.peers {
if peerID == pid {
continue
}
if p.Subscriber() != nil {
if dc := p.Subscriber().DataChannel(label); dc != nil && dc.ReadyState() == webrtc.DataChannelStateOpen {
dcs = append(dcs, dc)
}
}
}
for _, rp := range s.relayPeers {
if dc := rp.DataChannel(label); dc != nil {
dcs = append(dcs, dc)
}
}
return dcs
}
func (s *SessionLocal) audioLevelObserver(audioLevelInterval int) {
if audioLevelInterval <= 50 {
Logger.V(0).Info("Values near/under 20ms may return unexpected values")
}
if audioLevelInterval == 0 {
audioLevelInterval = 1000
}
for {
time.Sleep(time.Duration(audioLevelInterval) * time.Millisecond)
if s.closed.get() {
return
}
levels := s.audioObs.Calc()
if levels == nil {
continue
}
msg := ChannelAPIMessage{
Method: AudioLevelsMethod,
Params: levels,
}
l, err := json.Marshal(&msg)
if err != nil {
Logger.Error(err, "Marshaling audio levels err")
continue
}
sl := string(l)
dcs := s.GetDataChannels("", APIChannelLabel)
for _, ch := range dcs {
if err = ch.SendText(sl); err != nil {
Logger.Error(err, "Sending audio levels err")
}
}
}
}

278
pkg/sfu/sfu.go Normal file
View File

@@ -0,0 +1,278 @@
package sfu
import (
"math/rand"
"net"
"os"
"runtime"
"sync"
"time"
"github.com/go-logr/logr"
"github.com/pion/ice/v2"
"github.com/pion/turn/v2"
"github.com/pion/webrtc/v3"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/livekit/livekit-server/pkg/sfu/stats"
)
// Logger is an implementation of logr.Logger. If is not provided - will be turned off.
var Logger logr.Logger = logr.Discard()
// ICEServerConfig defines parameters for ice servers
type ICEServerConfig struct {
URLs []string `mapstructure:"urls"`
Username string `mapstructure:"username"`
Credential string `mapstructure:"credential"`
}
type Candidates struct {
IceLite bool `mapstructure:"icelite"`
NAT1To1IPs []string `mapstructure:"nat1to1"`
}
// WebRTCTransportConfig represents Configuration options
type WebRTCTransportConfig struct {
Configuration webrtc.Configuration
Setting webrtc.SettingEngine
Router RouterConfig
BufferFactory *buffer.Factory
}
type WebRTCTimeoutsConfig struct {
ICEDisconnectedTimeout int `mapstructure:"disconnected"`
ICEFailedTimeout int `mapstructure:"failed"`
ICEKeepaliveInterval int `mapstructure:"keepalive"`
}
// WebRTCConfig defines parameters for ice
type WebRTCConfig struct {
ICESinglePort int `mapstructure:"singleport"`
ICEPortRange []uint16 `mapstructure:"portrange"`
ICEServers []ICEServerConfig `mapstructure:"iceserver"`
Candidates Candidates `mapstructure:"candidates"`
SDPSemantics string `mapstructure:"sdpsemantics"`
MDNS bool `mapstructure:"mdns"`
Timeouts WebRTCTimeoutsConfig `mapstructure:"timeouts"`
}
// Config for base SFU
type Config struct {
SFU struct {
Ballast int64 `mapstructure:"ballast"`
WithStats bool `mapstructure:"withstats"`
} `mapstructure:"sfu"`
WebRTC WebRTCConfig `mapstructure:"webrtc"`
Router RouterConfig `mapstructure:"Router"`
Turn TurnConfig `mapstructure:"turn"`
BufferFactory *buffer.Factory
TurnAuth func(username string, realm string, srcAddr net.Addr) ([]byte, bool)
}
var (
packetFactory *sync.Pool
)
// SFU represents an sfu instance
type SFU struct {
sync.RWMutex
webrtc WebRTCTransportConfig
turn *turn.Server
sessions map[string]Session
datachannels []*Datachannel
withStats bool
}
// NewWebRTCTransportConfig parses our settings and returns a usable WebRTCTransportConfig for creating PeerConnections
func NewWebRTCTransportConfig(c Config) WebRTCTransportConfig {
se := webrtc.SettingEngine{}
se.DisableMediaEngineCopy(true)
if c.WebRTC.ICESinglePort != 0 {
Logger.Info("Listen on ", "single-port", c.WebRTC.ICESinglePort)
udpListener, err := net.ListenUDP("udp", &net.UDPAddr{
IP: net.IP{0, 0, 0, 0},
Port: c.WebRTC.ICESinglePort,
})
if err != nil {
panic(err)
}
se.SetICEUDPMux(webrtc.NewICEUDPMux(nil, udpListener))
} else {
var icePortStart, icePortEnd uint16
if c.Turn.Enabled && len(c.Turn.PortRange) == 0 {
icePortStart = sfuMinPort
icePortEnd = sfuMaxPort
} else if len(c.WebRTC.ICEPortRange) == 2 {
icePortStart = c.WebRTC.ICEPortRange[0]
icePortEnd = c.WebRTC.ICEPortRange[1]
}
if icePortStart != 0 || icePortEnd != 0 {
if err := se.SetEphemeralUDPPortRange(icePortStart, icePortEnd); err != nil {
panic(err)
}
}
}
var iceServers []webrtc.ICEServer
if c.WebRTC.Candidates.IceLite {
se.SetLite(c.WebRTC.Candidates.IceLite)
} else {
for _, iceServer := range c.WebRTC.ICEServers {
s := webrtc.ICEServer{
URLs: iceServer.URLs,
Username: iceServer.Username,
Credential: iceServer.Credential,
}
iceServers = append(iceServers, s)
}
}
se.BufferFactory = c.BufferFactory.GetOrNew
sdpSemantics := webrtc.SDPSemanticsUnifiedPlan
switch c.WebRTC.SDPSemantics {
case "unified-plan-with-fallback":
sdpSemantics = webrtc.SDPSemanticsUnifiedPlanWithFallback
case "plan-b":
sdpSemantics = webrtc.SDPSemanticsPlanB
}
if c.WebRTC.Timeouts.ICEDisconnectedTimeout == 0 &&
c.WebRTC.Timeouts.ICEFailedTimeout == 0 &&
c.WebRTC.Timeouts.ICEKeepaliveInterval == 0 {
Logger.Info("No webrtc timeouts found in config, using default ones")
} else {
se.SetICETimeouts(
time.Duration(c.WebRTC.Timeouts.ICEDisconnectedTimeout)*time.Second,
time.Duration(c.WebRTC.Timeouts.ICEFailedTimeout)*time.Second,
time.Duration(c.WebRTC.Timeouts.ICEKeepaliveInterval)*time.Second,
)
}
w := WebRTCTransportConfig{
Configuration: webrtc.Configuration{
ICEServers: iceServers,
SDPSemantics: sdpSemantics,
},
Setting: se,
Router: c.Router,
BufferFactory: c.BufferFactory,
}
if len(c.WebRTC.Candidates.NAT1To1IPs) > 0 {
w.Setting.SetNAT1To1IPs(c.WebRTC.Candidates.NAT1To1IPs, webrtc.ICECandidateTypeHost)
}
if !c.WebRTC.MDNS {
w.Setting.SetICEMulticastDNSMode(ice.MulticastDNSModeDisabled)
}
if c.SFU.WithStats {
w.Router.WithStats = true
stats.InitStats()
}
return w
}
func init() {
// Init packet factory
packetFactory = &sync.Pool{
New: func() interface{} {
b := make([]byte, 1460)
return &b
},
}
}
// NewSFU creates a new sfu instance
func NewSFU(c Config) *SFU {
// Init random seed
rand.Seed(time.Now().UnixNano())
// Init ballast
ballast := make([]byte, c.SFU.Ballast*1024*1024)
if c.BufferFactory == nil {
c.BufferFactory = buffer.NewBufferFactory(c.Router.MaxPacketTrack, Logger)
}
w := NewWebRTCTransportConfig(c)
sfu := &SFU{
webrtc: w,
sessions: make(map[string]Session),
withStats: c.Router.WithStats,
}
if c.Turn.Enabled {
ts, err := InitTurnServer(c.Turn, c.TurnAuth)
if err != nil {
Logger.Error(err, "Could not init turn server err")
os.Exit(1)
}
sfu.turn = ts
}
runtime.KeepAlive(ballast)
return sfu
}
// NewSession creates a new SessionLocal instance
func (s *SFU) newSession(id string) Session {
session := NewSession(id, s.datachannels, s.webrtc).(*SessionLocal)
session.OnClose(func() {
s.Lock()
delete(s.sessions, id)
s.Unlock()
if s.withStats {
stats.Sessions.Dec()
}
})
s.Lock()
s.sessions[id] = session
s.Unlock()
if s.withStats {
stats.Sessions.Inc()
}
return session
}
// GetSession by id
func (s *SFU) getSession(id string) Session {
s.RLock()
defer s.RUnlock()
return s.sessions[id]
}
func (s *SFU) GetSession(sid string) (Session, WebRTCTransportConfig) {
session := s.getSession(sid)
if session == nil {
session = s.newSession(sid)
}
return session, s.webrtc
}
func (s *SFU) NewDatachannel(label string) *Datachannel {
dc := &Datachannel{Label: label}
s.datachannels = append(s.datachannels, dc)
return dc
}
// GetSessions return all sessions
func (s *SFU) GetSessions() []Session {
s.RLock()
defer s.RUnlock()
sessions := make([]Session, 0, len(s.sessions))
for _, session := range s.sessions {
sessions = append(sessions, session)
}
return sessions
}

360
pkg/sfu/sfu_test.go Normal file
View File

@@ -0,0 +1,360 @@
package sfu
import (
"sync"
"testing"
"time"
"github.com/lucsky/cuid"
"github.com/pion/webrtc/v3"
med "github.com/pion/webrtc/v3/pkg/media"
"github.com/stretchr/testify/assert"
"github.com/livekit/livekit-server/pkg/sfu/logger"
)
// Init test helpers
func signalPair(pcOffer *webrtc.PeerConnection, pcAnswer *webrtc.PeerConnection) error {
offer, err := pcOffer.CreateOffer(nil)
if err != nil {
return err
}
gatherComplete := webrtc.GatheringCompletePromise(pcOffer)
if err = pcOffer.SetLocalDescription(offer); err != nil {
return err
}
<-gatherComplete
if err = pcAnswer.SetRemoteDescription(*pcOffer.LocalDescription()); err != nil {
return err
}
answer, err := pcAnswer.CreateAnswer(nil)
if err != nil {
return err
}
if err = pcAnswer.SetLocalDescription(answer); err != nil {
return err
}
return pcOffer.SetRemoteDescription(*pcAnswer.LocalDescription())
}
func sendRTPUntilDone(start, done <-chan struct{}, t *testing.T, track *webrtc.TrackLocalStaticSample) {
<-start
for {
select {
case <-time.After(20 * time.Millisecond):
assert.NoError(t, track.WriteSample(med.Sample{Data: []byte{0x0, 0xff, 0xff, 0xff, 0xff}, Duration: time.Second}))
case <-done:
return
}
}
}
// newPair creates two new peer connections (an offerer and an answerer) using
// the api.
func newPair(cfg webrtc.Configuration, api *webrtc.API) (pcOffer *webrtc.PeerConnection, pcAnswer *webrtc.PeerConnection, err error) {
pca, err := api.NewPeerConnection(cfg)
if err != nil {
return nil, nil, err
}
pcb, err := api.NewPeerConnection(cfg)
if err != nil {
return nil, nil, err
}
return pca, pcb, nil
}
type media struct {
kind string
id string
tid string
}
type action struct {
id string
kind string
sleep time.Duration
media []media
}
type peer struct {
id string
mu sync.Mutex
local *PeerLocal
remotePub *webrtc.PeerConnection
remoteSub *webrtc.PeerConnection
subs sync.WaitGroup
pubs []*sender
}
type step struct {
actions []*action
}
type sender struct {
transceiver *webrtc.RTPTransceiver
start chan struct{}
}
func addMedia(done <-chan struct{}, t *testing.T, pc *webrtc.PeerConnection, media []media) []*sender {
var senders []*sender
for _, media := range media {
var track *webrtc.TrackLocalStaticSample
var err error
start := make(chan struct{})
switch media.kind {
case "audio":
track, err = webrtc.NewTrackLocalStaticSample(webrtc.RTPCodecCapability{MimeType: "audio/opus"}, media.tid, media.id)
assert.NoError(t, err)
transceiver, err := pc.AddTransceiverFromTrack(track, webrtc.RTPTransceiverInit{
Direction: webrtc.RTPTransceiverDirectionSendonly,
})
assert.NoError(t, err)
senders = append(senders, &sender{transceiver: transceiver, start: start})
case "video":
track, err = webrtc.NewTrackLocalStaticSample(webrtc.RTPCodecCapability{MimeType: "video/vp8"}, media.tid, media.id)
assert.NoError(t, err)
transceiver, err := pc.AddTransceiverFromTrack(track, webrtc.RTPTransceiverInit{
Direction: webrtc.RTPTransceiverDirectionSendonly,
})
assert.NoError(t, err)
senders = append(senders, &sender{transceiver: transceiver, start: start})
}
go sendRTPUntilDone(start, done, t, track)
}
return senders
}
func newTestConfig() Config {
return Config{
Router: RouterConfig{MaxPacketTrack: 200},
}
}
func TestSFU_SessionScenarios(t *testing.T) {
logger.SetGlobalOptions(logger.GlobalConfig{V: 2}) // 2 - TRACE
Logger = logger.New()
config := newTestConfig()
sfu := NewSFU(config)
sfu.NewDatachannel(APIChannelLabel)
tests := []struct {
name string
steps []step
}{
{
name: "Multiple stream publish",
steps: []step{
{
actions: []*action{{
id: "remote1",
kind: "join",
}, {
id: "remote2",
kind: "join",
}},
},
{
actions: []*action{{
id: "remote1",
kind: "publish",
media: []media{
{kind: "audio", id: "stream1", tid: "audio1"},
{kind: "video", id: "stream1", tid: "video1"},
},
}},
}, {
actions: []*action{{
id: "remote2",
kind: "publish",
media: []media{
{kind: "audio", id: "stream2", tid: "audio2"},
{kind: "video", id: "stream2", tid: "video2"},
},
}},
},
{
actions: []*action{{
id: "remote1",
kind: "unpublish",
media: []media{
{kind: "audio", id: "stream3", tid: "audio3"},
{kind: "video", id: "stream3", tid: "video3"},
},
}},
},
{
actions: []*action{{
id: "remote2",
kind: "unpublish",
media: []media{
{kind: "audio", id: "stream1", tid: "audio1"},
{kind: "video", id: "stream1", tid: "video1"},
},
}},
},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
testDone := atomicBool(0)
var mu sync.RWMutex
done := make(chan struct{})
peers := make(map[string]*peer)
for _, step := range tt.steps {
for _, action := range step.actions {
func() {
switch action.kind {
case "join":
me, _ := getPublisherMediaEngine()
se := webrtc.SettingEngine{}
se.DisableMediaEngineCopy(true)
err := me.RegisterDefaultCodecs()
assert.NoError(t, err)
api := webrtc.NewAPI(webrtc.WithMediaEngine(me), webrtc.WithSettingEngine(se))
pub, err := api.NewPeerConnection(webrtc.Configuration{})
assert.NoError(t, err)
sub, err := api.NewPeerConnection(webrtc.Configuration{})
assert.NoError(t, err)
local := NewPeer(sfu)
_, err = pub.CreateDataChannel("ion-sfu", nil)
p := &peer{id: action.id, remotePub: pub, remoteSub: sub, local: local}
sub.OnTrack(func(track *webrtc.TrackRemote, recv *webrtc.RTPReceiver) {
mu.Lock()
p.subs.Done()
mu.Unlock()
})
mu.Lock()
for id, existing := range peers {
if id != action.id {
p.subs.Add(len(existing.pubs))
}
}
peers[action.id] = p
mu.Unlock()
p.mu.Lock()
p.remotePub.OnNegotiationNeeded(func() {
p.mu.Lock()
defer p.mu.Unlock()
o, err := p.remotePub.CreateOffer(nil)
assert.NoError(t, err)
err = p.remotePub.SetLocalDescription(o)
assert.NoError(t, err)
a, err := p.local.Answer(o)
assert.NoError(t, err)
err = p.remotePub.SetRemoteDescription(*a)
assert.NoError(t, err)
for _, pub := range p.pubs {
if pub.start != nil {
close(pub.start)
pub.start = nil
}
}
})
p.local.OnIceCandidate = func(init *webrtc.ICECandidateInit, i int) {
switch i {
case subscriber:
p.remoteSub.AddICECandidate(*init)
case publisher:
p.remotePub.AddICECandidate(*init)
}
}
p.local.OnOffer = func(o *webrtc.SessionDescription) {
if testDone.get() {
return
}
p.mu.Lock()
defer p.mu.Unlock()
err := p.remoteSub.SetRemoteDescription(*o)
assert.NoError(t, err)
a, err := p.remoteSub.CreateAnswer(nil)
assert.NoError(t, err)
err = p.remoteSub.SetLocalDescription(a)
assert.NoError(t, err)
go func() {
if testDone.get() {
return
}
err = p.local.SetRemoteDescription(a)
assert.NoError(t, err)
}()
}
offer, err := p.remotePub.CreateOffer(nil)
assert.NoError(t, err)
gatherComplete := webrtc.GatheringCompletePromise(p.remotePub)
err = p.remotePub.SetLocalDescription(offer)
assert.NoError(t, err)
<-gatherComplete
err = p.local.Join("test sid", cuid.New())
assert.NoError(t, err)
answer, err := p.local.Answer(*p.remotePub.LocalDescription())
err = p.remotePub.SetRemoteDescription(*answer)
assert.NoError(t, err)
p.mu.Unlock()
case "publish":
mu.Lock()
peer := peers[action.id]
peer.mu.Lock()
// all other peers should get sub'd
for id, p := range peers {
if id != peer.id {
p.subs.Add(len(action.media))
}
}
peer.pubs = append(peer.pubs, addMedia(done, t, peer.remotePub, action.media)...)
peer.mu.Unlock()
mu.Unlock()
case "unpublish":
mu.Lock()
peer := peers[action.id]
peer.mu.Lock()
for _, media := range action.media {
for _, pub := range peer.pubs {
if pub.transceiver != nil && pub.transceiver.Sender().Track().ID() == media.tid {
peer.remotePub.RemoveTrack(pub.transceiver.Sender())
pub.transceiver = nil
}
}
}
peer.mu.Unlock()
mu.Unlock()
}
}()
time.Sleep(1 * time.Second)
}
}
for _, p := range peers {
p.subs.Wait()
}
testDone.set(true)
close(done)
for _, p := range peers {
p.mu.Lock()
p.remotePub.Close()
p.remoteSub.Close()
p.local.Close()
p.mu.Unlock()
}
})
}
}

29
pkg/sfu/simulcast.go Normal file
View File

@@ -0,0 +1,29 @@
package sfu
import "time"
const (
quarterResolution = "q"
halfResolution = "h"
fullResolution = "f"
)
type SimulcastConfig struct {
BestQualityFirst bool `mapstructure:"bestqualityfirst"`
EnableTemporalLayer bool `mapstructure:"enabletemporallayer"`
}
type simulcastTrackHelpers struct {
switchDelay time.Time
temporalSupported bool
temporalEnabled bool
lTSCalc atomicInt64
// VP8Helper temporal helpers
pRefPicID atomicUint16
refPicID atomicUint16
lPicID atomicUint16
pRefTlZIdx atomicUint8
refTlZIdx atomicUint8
lTlZIdx atomicUint8
}

174
pkg/sfu/stats/stream.go Normal file
View File

@@ -0,0 +1,174 @@
package stats
import (
"math"
"sync"
"sync/atomic"
"github.com/livekit/livekit-server/pkg/sfu/buffer"
"github.com/prometheus/client_golang/prometheus"
)
var (
driftBuckets = []float64{5, 10, 20, 40, 80, 160, math.Inf(+1)}
drift = prometheus.NewHistogram(prometheus.HistogramOpts{
Subsystem: "rtp",
Name: "drift_millis",
Buckets: driftBuckets,
})
expectedCount = prometheus.NewCounter(prometheus.CounterOpts{
Subsystem: "rtp",
Name: "expected",
})
receivedCount = prometheus.NewCounter(prometheus.CounterOpts{
Subsystem: "rtp",
Name: "received",
})
packetCount = prometheus.NewCounter(prometheus.CounterOpts{
Subsystem: "rtp",
Name: "packets",
})
totalBytes = prometheus.NewCounter(prometheus.CounterOpts{
Subsystem: "rtp",
Name: "bytes",
})
expectedMinusReceived = prometheus.NewSummary(prometheus.SummaryOpts{
Subsystem: "rtp",
Name: "expected_minus_received",
})
lostRate = prometheus.NewSummary(prometheus.SummaryOpts{
Subsystem: "rtp",
Name: "lost_rate",
})
jitter = prometheus.NewSummary(prometheus.SummaryOpts{
Subsystem: "rtp",
Name: "jitter",
})
Sessions = prometheus.NewGauge(prometheus.GaugeOpts{
Subsystem: "sfu",
Name: "sessions",
Help: "Current number of sessions",
})
Peers = prometheus.NewGauge(prometheus.GaugeOpts{
Subsystem: "sfu",
Name: "peers",
Help: "Current number of peers connected",
})
AudioTracks = prometheus.NewGauge(prometheus.GaugeOpts{
Subsystem: "sfu",
Name: "audio_tracks",
Help: "Current number of audio tracks",
})
VideoTracks = prometheus.NewGauge(prometheus.GaugeOpts{
Subsystem: "sfu",
Name: "video_tracks",
Help: "Current number of video tracks",
})
)
func InitStats() {
prometheus.MustRegister(drift)
prometheus.MustRegister(expectedCount)
prometheus.MustRegister(receivedCount)
prometheus.MustRegister(packetCount)
prometheus.MustRegister(totalBytes)
prometheus.MustRegister(expectedMinusReceived)
prometheus.MustRegister(lostRate)
prometheus.MustRegister(jitter)
prometheus.MustRegister(Sessions)
prometheus.MustRegister(AudioTracks)
prometheus.MustRegister(VideoTracks)
}
// Stream contains buffer statistics
type Stream struct {
sync.RWMutex
Buffer *buffer.Buffer
cname string
driftInMillis uint64
hasStats bool
lastStats buffer.Stats
diffStats buffer.Stats
}
// NewStream constructs a new Stream
func NewStream(buffer *buffer.Buffer) *Stream {
s := &Stream{
Buffer: buffer,
}
return s
}
// GetCName returns the cname for a given stream
func (s *Stream) GetCName() string {
s.RLock()
defer s.RUnlock()
return s.cname
}
func (s *Stream) SetCName(cname string) {
s.Lock()
defer s.Unlock()
s.cname = cname
}
func (s *Stream) SetDriftInMillis(driftInMillis uint64) {
atomic.StoreUint64(&s.driftInMillis, driftInMillis)
}
func (s *Stream) GetDriftInMillis() uint64 {
return atomic.LoadUint64(&s.driftInMillis)
}
func (s *Stream) UpdateStats(stats buffer.Stats) (hasDiff bool, diffStats buffer.Stats) {
s.Lock()
defer s.Unlock()
hadStats := false
if s.hasStats {
s.diffStats.LastExpected = stats.LastExpected - s.lastStats.LastExpected
s.diffStats.LastReceived = stats.LastReceived - s.lastStats.LastReceived
s.diffStats.PacketCount = stats.PacketCount - s.lastStats.PacketCount
s.diffStats.TotalByte = stats.TotalByte - s.lastStats.TotalByte
hadStats = true
}
s.lastStats = stats
s.hasStats = true
return hadStats, s.diffStats
}
func (s *Stream) CalcStats() {
bufferStats := s.Buffer.GetStats()
driftInMillis := s.GetDriftInMillis()
hadStats, diffStats := s.UpdateStats(bufferStats)
drift.Observe(float64(driftInMillis))
if hadStats {
expectedCount.Add(float64(diffStats.LastExpected))
receivedCount.Add(float64(diffStats.LastReceived))
packetCount.Add(float64(diffStats.PacketCount))
totalBytes.Add(float64(diffStats.TotalByte))
}
expectedMinusReceived.Observe(float64(bufferStats.LastExpected - bufferStats.LastReceived))
lostRate.Observe(float64(bufferStats.LostRate))
jitter.Observe(bufferStats.Jitter)
}

1051
pkg/sfu/streamallocator.go Normal file

File diff suppressed because it is too large Load Diff

147
pkg/sfu/streamtracker.go Normal file
View File

@@ -0,0 +1,147 @@
package sfu
import (
"sync/atomic"
"time"
)
type StreamStatus int32
func (s StreamStatus) String() string {
switch s {
case StreamStatusStopped:
return "stopped"
case StreamStatusActive:
return "active"
default:
return "unknown"
}
}
const (
StreamStatusStopped StreamStatus = 0
StreamStatusActive StreamStatus = 1
)
// StreamTracker keeps track of packet flow and ensures a particular uptrack is consistently producing
// It runs its own goroutine for detection, and fires OnStatusChanged callback
type StreamTracker struct {
// number of samples needed per cycle
SamplesRequired uint32
// number of cycles needed to be active
CyclesRequired uint64
CycleDuration time.Duration
OnStatusChanged func(StreamStatus)
paused atomicBool
status atomicInt32 // stores StreamStatus
countSinceLast uint32 // number of packets received since last check
running chan struct{}
// only access within detectWorker
cycleCount uint64
// only access by the same goroutine as Observe
lastSN uint16
}
func NewStreamTracker() *StreamTracker {
s := &StreamTracker{
SamplesRequired: 5,
CyclesRequired: 60, // 30s of continuous stream
CycleDuration: 500 * time.Millisecond,
}
s.status.set(int32(StreamStatusActive))
return s
}
func (s *StreamTracker) Status() StreamStatus {
return StreamStatus(s.status.get())
}
func (s *StreamTracker) setStatus(status StreamStatus) {
if status != s.Status() {
s.status.set(int32(status))
if s.OnStatusChanged != nil {
s.OnStatusChanged(status)
}
}
}
func (s *StreamTracker) Start() {
if s.isRunning() {
return
}
s.running = make(chan struct{})
go s.detectWorker()
}
func (s *StreamTracker) Stop() {
if s.running != nil {
close(s.running)
s.running = nil
}
}
func (s *StreamTracker) SetPaused(paused bool) {
s.paused.set(paused)
}
func (s *StreamTracker) isRunning() bool {
if s.running == nil {
return false
}
select {
case <-s.running:
return false
default:
return true
}
}
// Observe a packet that's received
func (s *StreamTracker) Observe(sn uint16) {
if s.paused.get() {
return
}
// ignore out-of-order SNs
if (sn - s.lastSN) > uint16(1<<15) {
return
}
s.lastSN = sn
atomic.AddUint32(&s.countSinceLast, 1)
}
func (s *StreamTracker) detectWorker() {
ticker := time.NewTicker(s.CycleDuration)
for s.isRunning() {
<-ticker.C
if !s.isRunning() {
return
}
s.detectChanges()
}
}
func (s *StreamTracker) detectChanges() {
if s.paused.get() {
return
}
if atomic.LoadUint32(&s.countSinceLast) >= s.SamplesRequired {
s.cycleCount += 1
} else {
s.cycleCount = 0
}
if s.cycleCount == 0 && s.Status() == StreamStatusActive {
// flip to stopped
s.setStatus(StreamStatusStopped)
} else if s.cycleCount >= s.CyclesRequired && s.Status() == StreamStatusStopped {
// flip to active
s.setStatus(StreamStatusActive)
}
atomic.StoreUint32(&s.countSinceLast, 0)
}

View File

@@ -0,0 +1,45 @@
package sfu
import (
"testing"
"github.com/stretchr/testify/require"
)
func TestStreamTracker(t *testing.T) {
t.Run("flips to inactive immediately", func(t *testing.T) {
callbackCalled := atomicBool(0)
tracker := NewStreamTracker()
tracker.OnStatusChanged = func(status StreamStatus) {
callbackCalled.set(true)
}
require.Equal(t, StreamStatusActive, tracker.Status())
// run a single interation
tracker.detectChanges()
require.Equal(t, StreamStatusStopped, tracker.Status())
require.True(t, callbackCalled.get())
})
t.Run("flips back to active after iterations", func(t *testing.T) {
tracker := NewStreamTracker()
tracker.CyclesRequired = 2
tracker.SamplesRequired = 1
tracker.setStatus(StreamStatusStopped)
tracker.Observe(1)
tracker.detectChanges()
require.Equal(t, StreamStatusStopped, tracker.Status())
tracker.Observe(2)
tracker.detectChanges()
require.Equal(t, StreamStatusActive, tracker.Status())
})
t.Run("does not change to inactive when paused", func(t *testing.T) {
tracker := NewStreamTracker()
tracker.SetPaused(true)
tracker.detectChanges()
require.Equal(t, StreamStatusActive, tracker.Status())
})
}

322
pkg/sfu/subscriber.go Normal file
View File

@@ -0,0 +1,322 @@
package sfu
import (
"context"
"io"
"sync"
"time"
"github.com/bep/debounce"
"github.com/pion/rtcp"
"github.com/pion/webrtc/v3"
)
const APIChannelLabel = "ion-sfu"
type Subscriber struct {
sync.RWMutex
id string
pc *webrtc.PeerConnection
me *webrtc.MediaEngine
tracks map[string][]*DownTrack
channels map[string]*webrtc.DataChannel
candidates []webrtc.ICECandidateInit
negotiate func()
closeOnce sync.Once
noAutoSubscribe bool
}
// NewSubscriber creates a new Subscriber
func NewSubscriber(id string, cfg WebRTCTransportConfig) (*Subscriber, error) {
me, err := getSubscriberMediaEngine()
if err != nil {
Logger.Error(err, "NewPeer error")
return nil, errPeerConnectionInitFailed
}
api := webrtc.NewAPI(webrtc.WithMediaEngine(me), webrtc.WithSettingEngine(cfg.Setting))
pc, err := api.NewPeerConnection(cfg.Configuration)
if err != nil {
Logger.Error(err, "NewPeer error")
return nil, errPeerConnectionInitFailed
}
s := &Subscriber{
id: id,
me: me,
pc: pc,
tracks: make(map[string][]*DownTrack),
channels: make(map[string]*webrtc.DataChannel),
noAutoSubscribe: false,
}
pc.OnICEConnectionStateChange(func(connectionState webrtc.ICEConnectionState) {
Logger.V(1).Info("ice connection status", "state", connectionState)
switch connectionState {
case webrtc.ICEConnectionStateFailed:
fallthrough
case webrtc.ICEConnectionStateClosed:
s.closeOnce.Do(func() {
Logger.V(1).Info("webrtc ice closed", "peer_id", s.id)
if err := s.Close(); err != nil {
Logger.Error(err, "webrtc transport close err")
}
})
}
})
go s.downTracksReports()
return s, nil
}
func (s *Subscriber) AddDatachannel(peer Peer, dc *Datachannel) error {
ndc, err := s.pc.CreateDataChannel(dc.Label, &webrtc.DataChannelInit{})
if err != nil {
return err
}
mws := newDCChain(dc.middlewares)
p := mws.Process(ProcessFunc(func(ctx context.Context, args ProcessArgs) {
if dc.onMessage != nil {
dc.onMessage(ctx, args)
}
}))
ndc.OnMessage(func(msg webrtc.DataChannelMessage) {
p.Process(context.Background(), ProcessArgs{
Peer: peer,
Message: msg,
DataChannel: ndc,
})
})
s.channels[dc.Label] = ndc
return nil
}
// DataChannel returns the channel for a label
func (s *Subscriber) DataChannel(label string) *webrtc.DataChannel {
s.RLock()
defer s.RUnlock()
return s.channels[label]
}
func (s *Subscriber) OnNegotiationNeeded(f func()) {
debounced := debounce.New(250 * time.Millisecond)
s.negotiate = func() {
debounced(f)
}
}
func (s *Subscriber) CreateOffer() (webrtc.SessionDescription, error) {
offer, err := s.pc.CreateOffer(nil)
if err != nil {
return webrtc.SessionDescription{}, err
}
err = s.pc.SetLocalDescription(offer)
if err != nil {
return webrtc.SessionDescription{}, err
}
return offer, nil
}
// OnICECandidate handler
func (s *Subscriber) OnICECandidate(f func(c *webrtc.ICECandidate)) {
s.pc.OnICECandidate(f)
}
// AddICECandidate to peer connection
func (s *Subscriber) AddICECandidate(candidate webrtc.ICECandidateInit) error {
if s.pc.RemoteDescription() != nil {
return s.pc.AddICECandidate(candidate)
}
s.candidates = append(s.candidates, candidate)
return nil
}
func (s *Subscriber) AddDownTrack(streamID string, downTrack *DownTrack) {
s.Lock()
defer s.Unlock()
if dt, ok := s.tracks[streamID]; ok {
dt = append(dt, downTrack)
s.tracks[streamID] = dt
} else {
s.tracks[streamID] = []*DownTrack{downTrack}
}
}
func (s *Subscriber) RemoveDownTrack(streamID string, downTrack *DownTrack) {
s.Lock()
defer s.Unlock()
if dts, ok := s.tracks[streamID]; ok {
idx := -1
for i, dt := range dts {
if dt == downTrack {
idx = i
break
}
}
if idx >= 0 {
dts[idx] = dts[len(dts)-1]
dts[len(dts)-1] = nil
dts = dts[:len(dts)-1]
s.tracks[streamID] = dts
}
}
}
func (s *Subscriber) AddDataChannel(label string) (*webrtc.DataChannel, error) {
s.Lock()
defer s.Unlock()
if s.channels[label] != nil {
return s.channels[label], nil
}
dc, err := s.pc.CreateDataChannel(label, &webrtc.DataChannelInit{})
if err != nil {
Logger.Error(err, "dc creation error")
return nil, errCreatingDataChannel
}
s.channels[label] = dc
return dc, nil
}
// SetRemoteDescription sets the SessionDescription of the remote peer
func (s *Subscriber) SetRemoteDescription(desc webrtc.SessionDescription) error {
if err := s.pc.SetRemoteDescription(desc); err != nil {
Logger.Error(err, "SetRemoteDescription error")
return err
}
for _, c := range s.candidates {
if err := s.pc.AddICECandidate(c); err != nil {
Logger.Error(err, "Add subscriber ice candidate to peer err", "peer_id", s.id)
}
}
s.candidates = nil
return nil
}
func (s *Subscriber) RegisterDatachannel(label string, dc *webrtc.DataChannel) {
s.Lock()
s.channels[label] = dc
s.Unlock()
}
func (s *Subscriber) GetDatachannel(label string) *webrtc.DataChannel {
return s.DataChannel(label)
}
func (s *Subscriber) DownTracks() []*DownTrack {
s.RLock()
defer s.RUnlock()
var downTracks []*DownTrack
for _, tracks := range s.tracks {
downTracks = append(downTracks, tracks...)
}
return downTracks
}
func (s *Subscriber) GetDownTracks(streamID string) []*DownTrack {
s.RLock()
defer s.RUnlock()
return s.tracks[streamID]
}
// Negotiate fires a debounced negotiation request
func (s *Subscriber) Negotiate() {
s.negotiate()
}
// Close peer
func (s *Subscriber) Close() error {
return s.pc.Close()
}
func (s *Subscriber) downTracksReports() {
for {
time.Sleep(5 * time.Second)
if s.pc.ConnectionState() == webrtc.PeerConnectionStateClosed {
return
}
var r []rtcp.Packet
var sd []rtcp.SourceDescriptionChunk
s.RLock()
for _, dts := range s.tracks {
for _, dt := range dts {
if !dt.bound.get() {
continue
}
if sr := dt.CreateSenderReport(); sr != nil {
r = append(r, sr)
}
sd = append(sd, dt.CreateSourceDescriptionChunks()...)
}
}
s.RUnlock()
i := 0
j := 0
for i < len(sd) {
i = (j + 1) * 15
if i >= len(sd) {
i = len(sd)
}
nsd := sd[j*15 : i]
r = append(r, &rtcp.SourceDescription{Chunks: nsd})
j++
if err := s.pc.WriteRTCP(r); err != nil {
if err == io.EOF || err == io.ErrClosedPipe {
return
}
Logger.Error(err, "Sending downtrack reports err")
}
r = r[:0]
}
}
}
func (s *Subscriber) sendStreamDownTracksReports(streamID string) {
var r []rtcp.Packet
var sd []rtcp.SourceDescriptionChunk
s.RLock()
dts := s.tracks[streamID]
for _, dt := range dts {
if !dt.bound.get() {
continue
}
sd = append(sd, dt.CreateSourceDescriptionChunks()...)
}
s.RUnlock()
if len(sd) == 0 {
return
}
r = append(r, &rtcp.SourceDescription{Chunks: sd})
go func() {
r := r
i := 0
for {
if err := s.pc.WriteRTCP(r); err != nil {
Logger.Error(err, "Sending track binding reports err")
}
if i > 5 {
return
}
i++
time.Sleep(20 * time.Millisecond)
}
}()
}

170
pkg/sfu/turn.go Normal file
View File

@@ -0,0 +1,170 @@
package sfu
import (
"context"
"crypto/rand"
"crypto/tls"
"fmt"
"net"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/pion/dtls/v2"
"github.com/pion/logging"
"github.com/pion/turn/v2"
)
const (
turnMinPort = 32768
turnMaxPort = 46883
sfuMinPort = 46884
sfuMaxPort = 60999
)
type TurnAuth struct {
Credentials string `mapstructure:"credentials"`
Secret string `mapstructure:"secret"`
}
// WebRTCConfig defines parameters for ice
type TurnConfig struct {
Enabled bool `mapstructure:"enabled"`
Realm string `mapstructure:"realm"`
Address string `mapstructure:"address"`
Cert string `mapstructure:"cert"`
Key string `mapstructure:"key"`
Auth TurnAuth `mapstructure:"auth"`
PortRange []uint16 `mapstructure:"portrange"`
}
func InitTurnServer(conf TurnConfig, auth func(username, realm string, srcAddr net.Addr) ([]byte, bool)) (*turn.Server, error) {
var listeners []turn.ListenerConfig
// Create a UDP listener to pass into pion/turn
// pion/turn itself doesn't allocate any UDP sockets, but lets the user pass them in
// this allows us to add logging, storage or modify inbound/outbound traffic
udpListener, err := net.ListenPacket("udp4", conf.Address)
if err != nil {
return nil, err
}
// Create a TCP listener to pass into pion/turn
// pion/turn itself doesn't allocate any TCP listeners, but lets the user pass them in
// this allows us to add logging, storage or modify inbound/outbound traffic
tcpListener, err := net.Listen("tcp4", conf.Address)
if err != nil {
return nil, err
}
addr := strings.Split(conf.Address, ":")
var minPort uint16 = turnMinPort
var maxPort uint16 = turnMaxPort
if len(conf.PortRange) == 2 {
minPort = conf.PortRange[0]
maxPort = conf.PortRange[1]
}
if len(conf.Cert) > 0 && len(conf.Key) > 0 {
// Create a TLS listener to pass into pion/turn
cert, err := tls.LoadX509KeyPair(conf.Cert, conf.Key)
if err != nil {
return nil, err
}
config := tls.Config{Certificates: []tls.Certificate{cert}}
config.Rand = rand.Reader
tlsListener := tls.NewListener(tcpListener, &config)
listeners = append(listeners, turn.ListenerConfig{
Listener: tlsListener,
RelayAddressGenerator: &turn.RelayAddressGeneratorPortRange{
RelayAddress: net.ParseIP(addr[0]),
Address: "0.0.0.0",
MinPort: minPort,
MaxPort: maxPort,
},
})
// Create a DTLS listener to pass into pion/turn
ctx := context.Background()
dtlsConf := &dtls.Config{
Certificates: []tls.Certificate{cert},
ExtendedMasterSecret: dtls.RequireExtendedMasterSecret,
// Create timeout context for accepted connection.
ConnectContextMaker: func() (context.Context, func()) {
return context.WithTimeout(ctx, 30*time.Second)
},
}
port, err := strconv.ParseInt(addr[1], 10, 64)
if err != nil {
return nil, err
}
a := &net.UDPAddr{IP: net.ParseIP(addr[0]), Port: int(port)}
dtlsListener, err := dtls.Listen("udp4", a, dtlsConf)
if err != nil {
return nil, err
}
listeners = append(listeners, turn.ListenerConfig{
Listener: dtlsListener,
RelayAddressGenerator: &turn.RelayAddressGeneratorPortRange{
RelayAddress: net.ParseIP(addr[0]),
Address: "0.0.0.0",
MinPort: minPort,
MaxPort: maxPort,
},
})
}
if auth == nil {
if conf.Auth.Secret != "" {
logger := logging.NewDefaultLeveledLoggerForScope("lt-creds", logging.LogLevelTrace, os.Stdout)
auth = turn.NewLongTermAuthHandler(conf.Auth.Secret, logger)
} else {
usersMap := map[string][]byte{}
for _, kv := range regexp.MustCompile(`(\w+)=(\w+)`).FindAllStringSubmatch(conf.Auth.Credentials, -1) {
usersMap[kv[1]] = turn.GenerateAuthKey(kv[1], conf.Realm, kv[2])
}
if len(usersMap) == 0 {
Logger.Error(fmt.Errorf("No turn auth provided"), "Got err")
}
auth = func(username string, realm string, srcAddr net.Addr) ([]byte, bool) {
if key, ok := usersMap[username]; ok {
return key, true
}
return nil, false
}
}
}
return turn.NewServer(turn.ServerConfig{
Realm: conf.Realm,
// Set AuthHandler callback
// This is called everytime a user tries to authenticate with the TURN server
// Return the key for that user, or false when no user is found
AuthHandler: auth,
// ListenerConfig is a list of Listeners and the Configuration around them
ListenerConfigs: append(listeners, turn.ListenerConfig{
Listener: tcpListener,
RelayAddressGenerator: &turn.RelayAddressGeneratorPortRange{
RelayAddress: net.ParseIP(addr[0]),
Address: "0.0.0.0",
MinPort: minPort,
MaxPort: maxPort,
},
},
),
// PacketConnConfigs is a list of UDP Listeners and the Configuration around them
PacketConnConfigs: []turn.PacketConnConfig{
{
PacketConn: udpListener,
RelayAddressGenerator: &turn.RelayAddressGeneratorPortRange{
RelayAddress: net.ParseIP(addr[0]),
Address: "0.0.0.0",
MinPort: minPort,
MaxPort: maxPort,
},
},
},
})
}

313
pkg/sfu/twcc/twcc.go Normal file
View File

@@ -0,0 +1,313 @@
package twcc
import (
"encoding/binary"
"math"
"math/rand"
"sort"
"sync"
"github.com/gammazero/deque"
"github.com/pion/rtcp"
)
const (
baseSequenceNumberOffset = 8
packetStatusCountOffset = 10
referenceTimeOffset = 12
tccReportDelta = 1e8
tccReportDeltaAfterMark = 50e6
)
type rtpExtInfo struct {
ExtTSN uint32
Timestamp int64
}
// Responder will get the transport wide sequence number from rtp
// extension header, and reply with the rtcp feedback message
// according to:
// https://tools.ietf.org/html/draft-holmer-rmcat-transport-wide-cc-extensions-01
type Responder struct {
sync.Mutex
extInfo []rtpExtInfo
lastReport int64
cycles uint32
lastExtSN uint32
pktCtn uint8
lastSn uint16
lastExtInfo uint16
mSSRC uint32
sSSRC uint32
len uint16
deltaLen uint16
payload [100]byte
deltas [200]byte
chunk uint16
onFeedback func(packet rtcp.RawPacket)
}
func NewTransportWideCCResponder(ssrc uint32) *Responder {
return &Responder{
extInfo: make([]rtpExtInfo, 0, 101),
sSSRC: rand.Uint32(),
mSSRC: ssrc,
}
}
// Push a sequence number read from rtp packet ext packet
func (t *Responder) Push(sn uint16, timeNS int64, marker bool) {
t.Lock()
defer t.Unlock()
if sn < 0x0fff && (t.lastSn&0xffff) > 0xf000 {
t.cycles += 1 << 16
}
t.extInfo = append(t.extInfo, rtpExtInfo{
ExtTSN: t.cycles | uint32(sn),
Timestamp: timeNS / 1e3,
})
if t.lastReport == 0 {
t.lastReport = timeNS
}
t.lastSn = sn
delta := timeNS - t.lastReport
if len(t.extInfo) > 20 && t.mSSRC != 0 &&
(delta >= tccReportDelta || len(t.extInfo) > 100 || (marker && delta >= tccReportDeltaAfterMark)) {
if pkt := t.buildTransportCCPacket(); pkt != nil {
t.onFeedback(pkt)
}
t.lastReport = timeNS
}
}
// OnFeedback sets the callback for the formed twcc feedback rtcp packet
func (t *Responder) OnFeedback(f func(p rtcp.RawPacket)) {
t.onFeedback = f
}
func (t *Responder) buildTransportCCPacket() rtcp.RawPacket {
if len(t.extInfo) == 0 {
return nil
}
sort.Slice(t.extInfo, func(i, j int) bool {
return t.extInfo[i].ExtTSN < t.extInfo[j].ExtTSN
})
tccPkts := make([]rtpExtInfo, 0, int(float64(len(t.extInfo))*1.2))
for _, tccExtInfo := range t.extInfo {
if tccExtInfo.ExtTSN < t.lastExtSN {
continue
}
if t.lastExtSN != 0 {
for j := t.lastExtSN + 1; j < tccExtInfo.ExtTSN; j++ {
tccPkts = append(tccPkts, rtpExtInfo{ExtTSN: j})
}
}
t.lastExtSN = tccExtInfo.ExtTSN
tccPkts = append(tccPkts, tccExtInfo)
}
t.extInfo = t.extInfo[:0]
firstRecv := false
same := true
timestamp := int64(0)
lastStatus := rtcp.TypeTCCPacketReceivedWithoutDelta
maxStatus := rtcp.TypeTCCPacketNotReceived
var statusList deque.Deque
statusList.SetMinCapacity(3)
for _, stat := range tccPkts {
status := rtcp.TypeTCCPacketNotReceived
if stat.Timestamp != 0 {
var delta int64
if !firstRecv {
firstRecv = true
refTime := stat.Timestamp / 64e3
timestamp = refTime * 64e3
t.writeHeader(
uint16(tccPkts[0].ExtTSN),
uint16(len(tccPkts)),
uint32(refTime),
)
t.pktCtn++
}
delta = (stat.Timestamp - timestamp) / 250
if delta < 0 || delta > 255 {
status = rtcp.TypeTCCPacketReceivedLargeDelta
rDelta := int16(delta)
if int64(rDelta) != delta {
if rDelta > 0 {
rDelta = math.MaxInt16
} else {
rDelta = math.MinInt16
}
}
t.writeDelta(status, uint16(rDelta))
} else {
status = rtcp.TypeTCCPacketReceivedSmallDelta
t.writeDelta(status, uint16(delta))
}
timestamp = stat.Timestamp
}
if same && status != lastStatus && lastStatus != rtcp.TypeTCCPacketReceivedWithoutDelta {
if statusList.Len() > 7 {
t.writeRunLengthChunk(lastStatus, uint16(statusList.Len()))
statusList.Clear()
lastStatus = rtcp.TypeTCCPacketReceivedWithoutDelta
maxStatus = rtcp.TypeTCCPacketNotReceived
same = true
} else {
same = false
}
}
statusList.PushBack(status)
if status > maxStatus {
maxStatus = status
}
lastStatus = status
if !same && maxStatus == rtcp.TypeTCCPacketReceivedLargeDelta && statusList.Len() > 6 {
for i := 0; i < 7; i++ {
t.createStatusSymbolChunk(rtcp.TypeTCCSymbolSizeTwoBit, statusList.PopFront().(uint16), i)
}
t.writeStatusSymbolChunk(rtcp.TypeTCCSymbolSizeTwoBit)
lastStatus = rtcp.TypeTCCPacketReceivedWithoutDelta
maxStatus = rtcp.TypeTCCPacketNotReceived
same = true
for i := 0; i < statusList.Len(); i++ {
status = statusList.At(i).(uint16)
if status > maxStatus {
maxStatus = status
}
if same && lastStatus != rtcp.TypeTCCPacketReceivedWithoutDelta && status != lastStatus {
same = false
}
lastStatus = status
}
} else if !same && statusList.Len() > 13 {
for i := 0; i < 14; i++ {
t.createStatusSymbolChunk(rtcp.TypeTCCSymbolSizeOneBit, statusList.PopFront().(uint16), i)
}
t.writeStatusSymbolChunk(rtcp.TypeTCCSymbolSizeOneBit)
lastStatus = rtcp.TypeTCCPacketReceivedWithoutDelta
maxStatus = rtcp.TypeTCCPacketNotReceived
same = true
}
}
if statusList.Len() > 0 {
if same {
t.writeRunLengthChunk(lastStatus, uint16(statusList.Len()))
} else if maxStatus == rtcp.TypeTCCPacketReceivedLargeDelta {
for i := 0; i < statusList.Len(); i++ {
t.createStatusSymbolChunk(rtcp.TypeTCCSymbolSizeTwoBit, statusList.PopFront().(uint16), i)
}
t.writeStatusSymbolChunk(rtcp.TypeTCCSymbolSizeTwoBit)
} else {
for i := 0; i < statusList.Len(); i++ {
t.createStatusSymbolChunk(rtcp.TypeTCCSymbolSizeOneBit, statusList.PopFront().(uint16), i)
}
t.writeStatusSymbolChunk(rtcp.TypeTCCSymbolSizeOneBit)
}
}
pLen := t.len + t.deltaLen + 4
pad := pLen%4 != 0
var padSize uint8
for pLen%4 != 0 {
padSize++
pLen++
}
hdr := rtcp.Header{
Padding: pad,
Length: (pLen / 4) - 1,
Count: rtcp.FormatTCC,
Type: rtcp.TypeTransportSpecificFeedback,
}
hb, _ := hdr.Marshal()
pkt := make(rtcp.RawPacket, pLen)
copy(pkt, hb)
copy(pkt[4:], t.payload[:t.len])
copy(pkt[4+t.len:], t.deltas[:t.deltaLen])
if pad {
pkt[len(pkt)-1] = padSize
}
t.deltaLen = 0
return pkt
}
func (t *Responder) writeHeader(bSN, packetCount uint16, refTime uint32) {
/*
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| SSRC of packet sender |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| SSRC of media source |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| base sequence number | packet status count |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| reference time | fb pkt. count |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
binary.BigEndian.PutUint32(t.payload[0:], t.sSSRC)
binary.BigEndian.PutUint32(t.payload[4:], t.mSSRC)
binary.BigEndian.PutUint16(t.payload[baseSequenceNumberOffset:], bSN)
binary.BigEndian.PutUint16(t.payload[packetStatusCountOffset:], packetCount)
binary.BigEndian.PutUint32(t.payload[referenceTimeOffset:], refTime<<8|uint32(t.pktCtn))
t.len = 16
}
func (t *Responder) writeRunLengthChunk(symbol uint16, runLength uint16) {
/*
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|T| S | Run Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
binary.BigEndian.PutUint16(t.payload[t.len:], symbol<<13|runLength)
t.len += 2
}
func (t *Responder) createStatusSymbolChunk(symbolSize, symbol uint16, i int) {
/*
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|T|S| symbol list |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
numOfBits := symbolSize + 1
t.chunk = setNBitsOfUint16(t.chunk, numOfBits, numOfBits*uint16(i)+2, symbol)
}
func (t *Responder) writeStatusSymbolChunk(symbolSize uint16) {
t.chunk = setNBitsOfUint16(t.chunk, 1, 0, 1)
t.chunk = setNBitsOfUint16(t.chunk, 1, 1, symbolSize)
binary.BigEndian.PutUint16(t.payload[t.len:], t.chunk)
t.chunk = 0
t.len += 2
}
func (t *Responder) writeDelta(deltaType, delta uint16) {
if deltaType == rtcp.TypeTCCPacketReceivedSmallDelta {
t.deltas[t.deltaLen] = byte(delta)
t.deltaLen++
return
}
binary.BigEndian.PutUint16(t.deltas[t.deltaLen:], delta)
t.deltaLen += 2
}
// setNBitsOfUint16 will truncate the value to size, left-shift to startIndex position and set
func setNBitsOfUint16(src, size, startIndex, val uint16) uint16 {
if startIndex+size > 16 {
return 0
}
// truncate val to size bits
val &= (1 << size) - 1
return src | (val << (16 - size - startIndex))
}

346
pkg/sfu/twcc/twcc_test.go Normal file
View File

@@ -0,0 +1,346 @@
package twcc
import (
"math/rand"
"testing"
"time"
"github.com/pion/rtcp"
"github.com/stretchr/testify/assert"
)
func TestTransportWideCC_writeRunLengthChunk(t1 *testing.T) {
type fields struct {
len uint16
}
type args struct {
symbol uint16
runLength uint16
}
tests := []struct {
name string
fields fields
args args
wantErr bool
wantBytes []byte
}{
{
name: "Must not return error",
args: args{
symbol: rtcp.TypeTCCPacketNotReceived,
runLength: 221,
},
wantErr: false,
wantBytes: []byte{0, 0xdd},
}, {
name: "Must set run length after padding",
fields: fields{
len: 1,
},
args: args{
symbol: rtcp.TypeTCCPacketReceivedWithoutDelta,
runLength: 24,
},
wantBytes: []byte{0, 0x60, 0x18},
},
}
for _, tt := range tests {
tt := tt
t1.Run(tt.name, func(t1 *testing.T) {
t := &Responder{
len: tt.fields.len,
}
t.writeRunLengthChunk(tt.args.symbol, tt.args.runLength)
assert.Equal(t1, tt.wantBytes, t.payload[:t.len])
})
}
}
func TestTransportWideCC_writeStatusSymbolChunk(t1 *testing.T) {
type fields struct {
len uint16
}
type args struct {
symbolSize uint16
symbolList []uint16
}
tests := []struct {
name string
fields fields
args args
wantBytes []byte
}{
{
name: "Must not return error",
args: args{
symbolSize: rtcp.TypeTCCSymbolSizeOneBit,
symbolList: []uint16{rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived},
},
wantBytes: []byte{0x9F, 0x1C},
},
{
name: "Must set symbol chunk after padding",
fields: fields{
len: 1,
},
args: args{
symbolSize: rtcp.TypeTCCSymbolSizeTwoBit,
symbolList: []uint16{
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketReceivedWithoutDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived},
},
wantBytes: []byte{0x0, 0xcd, 0x50},
},
}
for _, tt := range tests {
tt := tt
t1.Run(tt.name, func(t1 *testing.T) {
t := &Responder{
len: tt.fields.len,
}
for i, v := range tt.args.symbolList {
t.createStatusSymbolChunk(tt.args.symbolSize, v, i)
}
t.writeStatusSymbolChunk(tt.args.symbolSize)
assert.Equal(t1, tt.wantBytes, t.payload[:t.len])
})
}
}
func TestTransportWideCC_writeDelta(t1 *testing.T) {
a := -32768
type fields struct {
deltaLen uint16
}
type args struct {
deltaType uint16
delta uint16
}
tests := []struct {
name string
fields fields
args args
want []byte
}{
{
name: "Must set correct small delta",
args: args{
deltaType: rtcp.TypeTCCPacketReceivedSmallDelta,
delta: 255,
},
want: []byte{0xff},
},
{
name: "Must set correct small delta with padding",
fields: fields{
deltaLen: 1,
},
args: args{
deltaType: rtcp.TypeTCCPacketReceivedSmallDelta,
delta: 255,
},
want: []byte{0, 0xff},
},
{
name: "Must set correct large delta",
args: args{
deltaType: rtcp.TypeTCCPacketReceivedLargeDelta,
delta: 32767,
},
want: []byte{0x7F, 0xFF},
},
{
name: "Must set correct large delta with padding",
fields: fields{
deltaLen: 1,
},
args: args{
deltaType: rtcp.TypeTCCPacketReceivedLargeDelta,
delta: uint16(a),
},
want: []byte{0, 0x80, 0x00},
},
}
for _, tt := range tests {
tt := tt
t1.Run(tt.name, func(t1 *testing.T) {
t := &Responder{
deltaLen: tt.fields.deltaLen,
}
t.writeDelta(tt.args.deltaType, tt.args.delta)
assert.Equal(t1, tt.want, t.deltas[:t.deltaLen])
assert.Equal(t1, tt.fields.deltaLen+tt.args.deltaType, t.deltaLen)
})
}
}
func TestTransportWideCC_writeHeader(t1 *testing.T) {
type fields struct {
tccPktCtn uint8
sSSRC uint32
mSSRC uint32
}
type args struct {
bSN uint16
packetCount uint16
refTime uint32
}
tests := []struct {
name string
fields fields
args args
want []byte
}{
{
name: "Must construct correct header",
fields: fields{
tccPktCtn: 23,
sSSRC: 4195875351,
mSSRC: 1124282272,
},
args: args{
bSN: 153,
packetCount: 1,
refTime: 4057090,
},
want: []byte{
0xfa, 0x17, 0xfa, 0x17,
0x43, 0x3, 0x2f, 0xa0,
0x0, 0x99, 0x0, 0x1,
0x3d, 0xe8, 0x2, 0x17},
},
}
for _, tt := range tests {
tt := tt
t1.Run(tt.name, func(t1 *testing.T) {
t := &Responder{
pktCtn: tt.fields.tccPktCtn,
sSSRC: tt.fields.sSSRC,
mSSRC: tt.fields.mSSRC,
}
t.writeHeader(tt.args.bSN, tt.args.packetCount, tt.args.refTime)
assert.Equal(t1, tt.want, t.payload[0:16])
})
}
}
func TestTccPacket(t1 *testing.T) {
want := []byte{
0xfa, 0x17, 0xfa, 0x17,
0x43, 0x3, 0x2f, 0xa0,
0x0, 0x99, 0x0, 0x1,
0x3d, 0xe8, 0x2, 0x17,
0x60, 0x18, 0x0, 0xdd,
0x9F, 0x1C, 0xcd, 0x50,
}
delta := []byte{
0xff, 0x80, 0xaa,
}
symbol1 := []uint16{rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived}
symbol2 := []uint16{
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketReceivedWithoutDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketReceivedSmallDelta,
rtcp.TypeTCCPacketNotReceived,
rtcp.TypeTCCPacketNotReceived}
t := &Responder{
pktCtn: 23,
sSSRC: 4195875351,
mSSRC: 1124282272,
}
t.writeHeader(153, 1, 4057090)
t.writeRunLengthChunk(rtcp.TypeTCCPacketReceivedWithoutDelta, 24)
t.writeRunLengthChunk(rtcp.TypeTCCPacketNotReceived, 221)
for i, v := range symbol1 {
t.createStatusSymbolChunk(rtcp.TypeTCCSymbolSizeOneBit, v, i)
}
t.writeStatusSymbolChunk(rtcp.TypeTCCSymbolSizeOneBit)
for i, v := range symbol2 {
t.createStatusSymbolChunk(rtcp.TypeTCCSymbolSizeTwoBit, v, i)
}
t.writeStatusSymbolChunk(rtcp.TypeTCCSymbolSizeTwoBit)
t.deltaLen = uint16(len(delta))
assert.Equal(t1, want, t.payload[:24])
pLen := t.len + t.deltaLen + 4
pad := pLen%4 != 0
for pLen%4 != 0 {
pLen++
}
hdr := rtcp.Header{
Padding: pad,
Length: (pLen / 4) - 1,
Count: rtcp.FormatTCC,
Type: rtcp.TypeTransportSpecificFeedback,
}
assert.Equal(t1, int(pLen), len(want)+3+4+1)
hb, _ := hdr.Marshal()
pkt := make([]byte, pLen)
copy(pkt, hb)
assert.Equal(t1, hb, pkt[:len(hb)])
copy(pkt[4:], t.payload[:t.len])
assert.Equal(t1, append(hb, t.payload[:t.len]...), pkt[:len(hb)+int(t.len)])
copy(pkt[4+t.len:], delta[:t.deltaLen])
assert.Equal(t1, delta, pkt[len(hb)+int(t.len):len(pkt)-1])
var ss rtcp.TransportLayerCC
err := ss.Unmarshal(pkt)
assert.NoError(t1, err)
assert.Equal(t1, hdr, ss.Header)
}
func BenchmarkBuildPacket(b *testing.B) {
rand.Seed(time.Now().UnixNano())
b.ReportAllocs()
n := 1 + rand.Intn(4-1+1)
var twcc Responder
tm := time.Now()
for i := 1; i < 100; i++ {
tm := tm.Add(time.Duration(60*n) * time.Millisecond)
twcc.extInfo = append(twcc.extInfo, rtpExtInfo{
ExtTSN: uint32(i),
Timestamp: tm.UnixNano(),
})
}
for i := 0; i < b.N; i++ {
_ = twcc.buildTransportCCPacket()
}
}