Integrate CueAlignmentHandler

Also changed ChunkingHandler to be one-one handler.

Issue: #355

Change-Id: Ie98a96bcc0ddded347699c9f333f604826976d11
This commit is contained in:
KongQun Yang 2018-03-20 10:12:18 -07:00
parent e685c8a63a
commit e1bb27f130
95 changed files with 397 additions and 714 deletions

View File

@ -806,8 +806,8 @@ class PackagerFunctionalTest(PackagerAppTest):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video']),
self._GetFlags(encryption=True, ad_cues='1.5'))
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-golden.mp4')
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-golden.mp4')
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-ad_cues-golden.mp4')
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-ad_cues-golden.mp4')
self._DiffGold(self.mpd_output, 'bear-640x360-av-cenc-ad_cues-golden.mpd')
self._VerifyDecryption(self.output[0], 'bear-640x360-a-demuxed-golden.mp4')
self._VerifyDecryption(self.output[1], 'bear-640x360-v-golden.mp4')
@ -1088,10 +1088,10 @@ class PackagerFunctionalTest(PackagerAppTest):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], hls=True),
self._GetFlags(encryption=True, output_hls=True, ad_cues='1.5'))
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-golden.mp4')
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-golden.mp4')
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-ad_cues-golden.mp4')
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-ad_cues-golden.mp4')
self._DiffGold(self.hls_master_playlist_output,
'bear-640x360-av-mp4-master-cenc-golden.m3u8')
'bear-640x360-av-mp4-master-cenc-ad_cues-golden.m3u8')
self._DiffGold(
os.path.join(self.tmp_dir, 'audio.m3u8'),
'bear-640x360-a-mp4-cenc-ad_cues-golden.m3u8')
@ -1146,8 +1146,8 @@ class PackagerFunctionalTest(PackagerAppTest):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], live=True),
self._GetFlags(generate_static_mpd=True, ad_cues='1.5'))
self._DiffLiveGold(self.output[0], 'bear-640x360-a-live-golden')
self._DiffLiveGold(self.output[1], 'bear-640x360-v-live-golden')
self._DiffLiveGold(self.output[0], 'bear-640x360-a-live-ad_cues-golden')
self._DiffLiveGold(self.output[1], 'bear-640x360-v-live-ad_cues-golden')
self._DiffGold(self.mpd_output,
'bear-640x360-av-live-static-ad_cues-golden.mpd')

View File

@ -7,7 +7,7 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="0" bandwidth="79930" codecs="opus" mimeType="audio/mp4" audioSamplingRate="48000">
<Representation id="0" bandwidth="81568" codecs="opus" mimeType="audio/mp4" audioSamplingRate="48000">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="975-1042" timescale="1000000">

View File

@ -3,7 +3,7 @@
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.736S">
<Period id="0">
<AdaptationSet id="0" contentType="audio" subsegmentAlignment="true">
<Representation id="0" bandwidth="75444" codecs="opus" mimeType="audio/webm" audioSamplingRate="48000">
<Representation id="0" bandwidth="76531" codecs="opus" mimeType="audio/webm" audioSamplingRate="48000">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.webm</BaseURL>
<SegmentBase indexRange="323-371" timescale="1000000">

Binary file not shown.

View File

@ -1,4 +1,4 @@
bandwidth: 129185
bandwidth: 129162
audio_info {
codec: "mp4a.40.2"
sampling_frequency: 44100

View File

@ -3,12 +3,12 @@
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXTINF:0.952,
#EXTINF:0.975,
output_audio-1.ts
#EXT-X-DISCONTINUITY
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
#EXTINF:0.998,
output_audio-2.ts
#EXTINF:0.813,
#EXTINF:0.789,
output_audio-3.ts
#EXT-X-ENDLIST

View File

@ -3,9 +3,9 @@
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:EVENT
#EXTINF:0.952,
#EXTINF:0.975,
output_audio-1.ts
#EXTINF:0.998,
output_audio-2.ts
#EXTINF:0.813,
#EXTINF:0.789,
output_audio-3.ts

View File

@ -3,12 +3,12 @@
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXTINF:0.952,
#EXTINF:0.975,
output_audio-1.ts
#EXT-X-DISCONTINUITY
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="skd://www.license.com/getkey?KeyId=31323334-3536-3738-3930-313233343536",KEYFORMATVERSIONS="1",KEYFORMAT="com.apple.streamingkeydelivery"
#EXTINF:0.998,
output_audio-2.ts
#EXTINF:0.813,
#EXTINF:0.789,
output_audio-3.ts
#EXT-X-ENDLIST

View File

@ -3,10 +3,10 @@
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXTINF:0.952,
#EXTINF:0.975,
output_audio-1.ts
#EXTINF:0.998,
output_audio-2.ts
#EXTINF:0.813,
#EXTINF:0.789,
output_audio-3.ts
#EXT-X-ENDLIST

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -4,9 +4,9 @@
#EXT-X-TARGETDURATION:2
#EXT-X-MEDIA-SEQUENCE:1
#EXT-X-DISCONTINUITY-SEQUENCE:1
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MjM0NTY3ODkwMTIzNDU2MQ==",IV=0x3334353637383930,KEYFORMAT="identity"
#EXTINF:0.998,
output_audio-2.ts
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MjM0NTY3ODkwMTIzNDU2MQ==",IV=0x3334353637383930,KEYFORMAT="identity"
#EXTINF:0.813,
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MzQ1Njc4OTAxMjM0NTYxMg==",IV=0x3334353637383930,KEYFORMAT="identity"
#EXTINF:0.789,
output_audio-3.ts

View File

@ -5,5 +5,5 @@
#EXT-X-MEDIA-SEQUENCE:1
#EXTINF:0.998,
output_audio-2.ts
#EXTINF:0.813,
#EXTINF:0.789,
output_audio-3.ts

View File

@ -5,14 +5,17 @@
#EXT-X-PLAYLIST-TYPE:VOD
#EXT-X-MAP:URI="output_audio.mp4",BYTERANGE="967@0"
#EXT-X-KEY:METHOD=SAMPLE-AES-CTR,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",KEYFORMAT="identity"
#EXTINF:0.998,
#EXT-X-BYTERANGE:16279@1035
#EXTINF:1.022,
#EXT-X-BYTERANGE:16655@1047
output_audio.mp4
#EXTINF:0.998,
#EXT-X-BYTERANGE:16674
#EXT-X-BYTERANGE:16650
output_audio.mp4
#EXTINF:0.046,
#EXT-X-BYTERANGE:1014
output_audio.mp4
#EXT-X-PLACEMENT-OPPORTUNITY
#EXTINF:0.766,
#EXT-X-BYTERANGE:10632
#EXTINF:0.697,
#EXT-X-BYTERANGE:9415
output_audio.mp4
#EXT-X-ENDLIST

View File

@ -5,13 +5,13 @@
#EXT-X-PLAYLIST-TYPE:VOD
#EXT-X-MAP:URI="output_audio.mp4",BYTERANGE="967@0"
#EXT-X-KEY:METHOD=SAMPLE-AES-CTR,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",KEYFORMAT="identity"
#EXTINF:0.998,
#EXT-X-BYTERANGE:16279@1035
#EXTINF:1.022,
#EXT-X-BYTERANGE:16655@1035
output_audio.mp4
#EXTINF:0.998,
#EXT-X-BYTERANGE:16674
#EXT-X-BYTERANGE:16650
output_audio.mp4
#EXTINF:0.766,
#EXT-X-BYTERANGE:10632
#EXTINF:0.743,
#EXT-X-BYTERANGE:10272
output_audio.mp4
#EXT-X-ENDLIST

View File

@ -4,10 +4,10 @@
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXT-X-MAP:URI="audio-init.mp4"
#EXTINF:0.998,
#EXTINF:1.022,
audio-1.m4s
#EXTINF:0.998,
audio-2.m4s
#EXTINF:0.766,
#EXTINF:0.743,
audio-3.m4s
#EXT-X-ENDLIST

View File

@ -7,8 +7,8 @@
output_audio-1.ts
#EXT-X-DISCONTINUITY
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
#EXTINF:0.975,
#EXTINF:1.010,
output_audio-2.ts
#EXTINF:0.836,
#EXTINF:0.801,
output_audio-3.ts
#EXT-X-ENDLIST

View File

@ -5,8 +5,8 @@
#EXT-X-PLAYLIST-TYPE:VOD
#EXTINF:0.975,
output_audio-1.ts
#EXTINF:0.975,
#EXTINF:1.010,
output_audio-2.ts
#EXTINF:0.836,
#EXTINF:0.801,
output_audio-3.ts
#EXT-X-ENDLIST

View File

@ -7,10 +7,10 @@
#EXTINF:0.975,
#EXT-X-BYTERANGE:23728@794
output_audio.mp4
#EXTINF:0.975,
#EXT-X-BYTERANGE:23730
#EXTINF:1.010,
#EXT-X-BYTERANGE:24574
output_audio.mp4
#EXTINF:0.836,
#EXT-X-BYTERANGE:20354
#EXTINF:0.801,
#EXT-X-BYTERANGE:19510
output_audio.mp4
#EXT-X-ENDLIST

View File

@ -3,5 +3,5 @@
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-STREAM-INF:BANDWIDTH=1242703,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
#EXT-X-STREAM-INF:BANDWIDTH=1242861,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
video.m3u8

View File

@ -3,5 +3,5 @@
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-STREAM-INF:BANDWIDTH=1168277,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
#EXT-X-STREAM-INF:BANDWIDTH=1168319,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
video.m3u8

View File

@ -19,7 +19,7 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1034" timescale="44100">

View File

@ -2,57 +2,57 @@
<!--Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>-->
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.73607S">
<Period id="0" duration="PT2.002S">
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
<AdaptationSet id="0" contentType="audio" subsegmentAlignment="true">
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="0" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<Representation id="0" bandwidth="129651" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1046" timescale="44100">
<Initialization range="0-966"/>
</SegmentBase>
</Representation>
</AdaptationSet>
<AdaptationSet id="2" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<BaseURL>output_video.mp4</BaseURL>
<SegmentBase indexRange="1091-1158" timescale="30000">
<Initialization range="0-1090"/>
</SegmentBase>
</Representation>
</AdaptationSet>
<AdaptationSet id="2" contentType="audio" subsegmentAlignment="true">
</Period>
<Period id="1" duration="PT0.734067S">
<AdaptationSet id="0" contentType="audio" subsegmentAlignment="true">
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="0" bandwidth="129651" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1034" timescale="44100">
<SegmentBase indexRange="967-1046" timescale="44100" presentationTimeOffset="91230">
<Initialization range="0-966"/>
</SegmentBase>
</Representation>
</AdaptationSet>
</Period>
<Period id="1" duration="PT0.734067S">
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
<AdaptationSet id="2" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="0" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<Representation id="1" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<BaseURL>output_video.mp4</BaseURL>
<SegmentBase indexRange="1091-1158" timescale="30000" presentationTimeOffset="62061">
<Initialization range="0-1090"/>
</SegmentBase>
</Representation>
</AdaptationSet>
<AdaptationSet id="2" contentType="audio" subsegmentAlignment="true">
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1034" timescale="44100" presentationTimeOffset="91230">
<Initialization range="0-966"/>
</SegmentBase>
</Representation>
</AdaptationSet>
</Period>
</MPD>

View File

@ -19,7 +19,7 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1034" timescale="44100">

View File

@ -19,7 +19,7 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="129035" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="129012" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="915-982" timescale="44100">

View File

@ -15,7 +15,7 @@
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" subsegmentAlignment="true">
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">

View File

@ -19,7 +19,7 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1034" timescale="44100">

View File

@ -21,12 +21,13 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="1" bandwidth="124859" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="124634" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="88064" d="33792"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="32768"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>

View File

@ -17,7 +17,7 @@
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="124859" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="124634" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
@ -25,8 +25,9 @@
</ContentProtection>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="88064" d="33792"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="32768"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>

View File

@ -17,12 +17,13 @@
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
<Representation id="1" bandwidth="125808" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="125598" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="88064" d="33792"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="32768"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>

View File

@ -17,12 +17,13 @@
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
<Representation id="1" bandwidth="125337" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="125122" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="88064" d="33792"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="32768"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>

View File

@ -15,14 +15,15 @@
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="125808" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="125598" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="88064" d="33792"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="32768"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>

View File

@ -13,12 +13,13 @@
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="122544" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="122308" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="88064" d="33792"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="32768"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>

View File

@ -12,27 +12,19 @@
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="131035" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="143117" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="2048"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>
</AdaptationSet>
</Period>
<Period id="1" duration="PT0.734067S">
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="108486" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" presentationTimeOffset="91230" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="3">
<SegmentTimeline>
<S t="88064" d="33792"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>
</AdaptationSet>
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" segmentAlignment="true" par="16:9">
<Representation id="0" bandwidth="869044" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<SegmentTemplate timescale="30000" presentationTimeOffset="62061" initialization="output_video-init.mp4" media="output_video-$Number$.m4s" startNumber="3">
@ -42,5 +34,15 @@
</SegmentTemplate>
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="105634" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" presentationTimeOffset="91230" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="4">
<SegmentTimeline>
<S t="91136" d="30720"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>
</AdaptationSet>
</Period>
</MPD>

View File

@ -13,12 +13,13 @@
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="122544" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="1" bandwidth="122308" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="44032" r="1"/>
<S t="88064" d="33792"/>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="32768"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>

View File

@ -0,0 +1,7 @@
#EXTM3U
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-STREAM-INF:BANDWIDTH=1152419,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
video.m3u8

View File

@ -3,5 +3,5 @@
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-STREAM-INF:BANDWIDTH=1111340,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
#EXT-X-STREAM-INF:BANDWIDTH=1111147,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
video.m3u8

View File

@ -3,5 +3,5 @@
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio/audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-STREAM-INF:BANDWIDTH=1105163,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
#EXT-X-STREAM-INF:BANDWIDTH=1105129,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
video/video.m3u8

View File

@ -32,7 +32,7 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="2" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="2" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1034" timescale="44100">

View File

@ -38,7 +38,7 @@
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
</ContentProtection>
<Representation id="3" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<Representation id="3" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>output_audio.mp4</BaseURL>
<SegmentBase indexRange="967-1034" timescale="44100">

View File

@ -8,10 +8,10 @@
#EXTINF:1.010,
#EXT-X-BYTERANGE:24460@977
output_audio.mp4
#EXTINF:0.975,
#EXT-X-BYTERANGE:23899
#EXTINF:1.010,
#EXT-X-BYTERANGE:24747
output_audio.mp4
#EXTINF:0.766,
#EXT-X-BYTERANGE:18811
#EXTINF:0.731,
#EXT-X-BYTERANGE:17963
output_audio.mp4
#EXT-X-ENDLIST

View File

@ -3,5 +3,5 @@
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-STREAM-INF:BANDWIDTH=1174135,CODECS="avc1.64001e,ec-3",RESOLUTION=640x360,AUDIO="default-audio-group"
#EXT-X-STREAM-INF:BANDWIDTH=1174212,CODECS="avc1.64001e,ec-3",RESOLUTION=640x360,AUDIO="default-audio-group"
video.m3u8

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -49,7 +49,7 @@ bool MediaHandler::ValidateOutputStreamIndex(size_t stream_index) const {
return stream_index < num_input_streams_;
}
Status MediaHandler::Dispatch(std::unique_ptr<StreamData> stream_data) {
Status MediaHandler::Dispatch(std::unique_ptr<StreamData> stream_data) const {
size_t output_stream_index = stream_data->stream_index;
auto handler_it = output_handlers_.find(output_stream_index);
if (handler_it == output_handlers_.end()) {

View File

@ -185,43 +185,54 @@ class MediaHandler {
/// Dispatch the stream data to downstream handlers. Note that
/// stream_data.stream_index should be the output stream index.
Status Dispatch(std::unique_ptr<StreamData> stream_data);
Status Dispatch(std::unique_ptr<StreamData> stream_data) const;
/// Dispatch the stream info to downstream handlers.
Status DispatchStreamInfo(
size_t stream_index, std::shared_ptr<const StreamInfo> stream_info) {
return Dispatch(StreamData::FromStreamInfo(stream_index, stream_info));
size_t stream_index,
std::shared_ptr<const StreamInfo> stream_info) const {
return Dispatch(
StreamData::FromStreamInfo(stream_index, std::move(stream_info)));
}
/// Dispatch the media sample to downstream handlers.
Status DispatchMediaSample(
size_t stream_index, std::shared_ptr<const MediaSample> media_sample) {
return Dispatch(StreamData::FromMediaSample(stream_index, media_sample));
size_t stream_index,
std::shared_ptr<const MediaSample> media_sample) const {
return Dispatch(
StreamData::FromMediaSample(stream_index, std::move(media_sample)));
}
/// Dispatch the text sample to downsream handlers.
// DispatchTextSample should only be override for testing.
Status DispatchTextSample(
size_t stream_index, std::shared_ptr<const TextSample> text_sample) {
return Dispatch(StreamData::FromTextSample(stream_index, text_sample));
size_t stream_index,
std::shared_ptr<const TextSample> text_sample) const {
return Dispatch(
StreamData::FromTextSample(stream_index, std::move(text_sample)));
}
/// Dispatch the segment info to downstream handlers.
Status DispatchSegmentInfo(
size_t stream_index, std::shared_ptr<const SegmentInfo> segment_info) {
return Dispatch(StreamData::FromSegmentInfo(stream_index, segment_info));
size_t stream_index,
std::shared_ptr<const SegmentInfo> segment_info) const {
return Dispatch(
StreamData::FromSegmentInfo(stream_index, std::move(segment_info)));
}
/// Dispatch the scte35 event to downstream handlers.
Status DispatchScte35Event(size_t stream_index,
std::shared_ptr<const Scte35Event> scte35_event) {
return Dispatch(StreamData::FromScte35Event(stream_index, scte35_event));
Status DispatchScte35Event(
size_t stream_index,
std::shared_ptr<const Scte35Event> scte35_event) const {
return Dispatch(
StreamData::FromScte35Event(stream_index, std::move(scte35_event)));
}
/// Dispatch the cue event to downstream handlers.
Status DispatchCueEvent(size_t stream_index,
std::shared_ptr<const CueEvent> cue_event) {
return Dispatch(StreamData::FromCueEvent(stream_index, cue_event));
std::shared_ptr<const CueEvent> cue_event) const {
return Dispatch(
StreamData::FromCueEvent(stream_index, std::move(cue_event)));
}
/// Flush the downstream connected at the specified output stream index.

View File

@ -9,46 +9,38 @@
#include <algorithm>
#include "packager/base/logging.h"
#include "packager/base/threading/platform_thread.h"
#include "packager/media/base/media_sample.h"
namespace shaka {
namespace media {
namespace {
int64_t kThreadIdUnset = -1;
const size_t kStreamIndex = 0;
} // namespace
ChunkingHandler::ChunkingHandler(const ChunkingParams& chunking_params)
: chunking_params_(chunking_params),
thread_id_(kThreadIdUnset),
media_sample_comparator_(this),
cached_media_sample_stream_data_(media_sample_comparator_) {
: chunking_params_(chunking_params) {
CHECK_NE(chunking_params.segment_duration_in_seconds, 0u);
}
ChunkingHandler::~ChunkingHandler() {}
Status ChunkingHandler::InitializeInternal() {
segment_info_.resize(num_input_streams());
subsegment_info_.resize(num_input_streams());
time_scales_.resize(num_input_streams());
last_sample_end_timestamps_.resize(num_input_streams());
num_cached_samples_.resize(num_input_streams());
if (num_input_streams() != 1 || next_output_stream_index() != 1) {
return Status(error::INVALID_ARGUMENT,
"Expects exactly one input and one output.");
}
return Status::OK;
}
Status ChunkingHandler::Process(std::unique_ptr<StreamData> stream_data) {
switch (stream_data->stream_data_type) {
case StreamDataType::kStreamInfo:
return OnStreamInfo(stream_data->stream_index, stream_data->stream_info);
case StreamDataType::kScte35Event:
return OnScte35Event(stream_data->stream_index,
stream_data->scte35_event);
return OnStreamInfo(std::move(stream_data->stream_info));
case StreamDataType::kCueEvent:
return OnCueEvent(std::move(stream_data->cue_event));
case StreamDataType::kSegmentInfo:
VLOG(3) << "Droppping existing segment info.";
return Status::OK;
case StreamDataType::kMediaSample:
return OnMediaSample(std::move(stream_data));
return OnMediaSample(std::move(stream_data->media_sample));
default:
VLOG(3) << "Stream data type "
<< static_cast<int>(stream_data->stream_data_type) << " ignored.";
@ -57,289 +49,103 @@ Status ChunkingHandler::Process(std::unique_ptr<StreamData> stream_data) {
}
Status ChunkingHandler::OnFlushRequest(size_t input_stream_index) {
// Process all cached samples.
while (!cached_media_sample_stream_data_.empty()) {
Status status =
ProcessMediaSampleStreamData(*cached_media_sample_stream_data_.top());
Status status = EndSegmentIfStarted();
if (!status.ok())
return status;
--num_cached_samples_[cached_media_sample_stream_data_.top()->stream_index];
cached_media_sample_stream_data_.pop();
}
if (segment_info_[input_stream_index]) {
auto& segment_info = segment_info_[input_stream_index];
if (segment_info->start_timestamp != -1) {
segment_info->duration = last_sample_end_timestamps_[input_stream_index] -
segment_info->start_timestamp;
Status status =
DispatchSegmentInfo(input_stream_index, std::move(segment_info));
if (!status.ok())
return status;
}
}
const size_t output_stream_index = input_stream_index;
return FlushDownstream(output_stream_index);
return FlushDownstream(kStreamIndex);
}
Status ChunkingHandler::OnStreamInfo(uint64_t stream_index,
std::shared_ptr<const StreamInfo> info) {
// Make sure the inputs come from the same thread.
const int64_t thread_id =
static_cast<int64_t>(base::PlatformThread::CurrentId());
int64_t expected = kThreadIdUnset;
if (!thread_id_.compare_exchange_strong(expected, thread_id) &&
expected != thread_id) {
return Status(error::CHUNKING_ERROR,
"Inputs should come from the same thread.");
}
const auto time_scale = info->time_scale();
time_scales_[stream_index] = time_scale;
// The video stream is treated as the main stream. If there is only one
// stream, it is the main stream.
const bool is_main_stream =
main_stream_index_ == kInvalidStreamIndex &&
(info->stream_type() == kStreamVideo || num_input_streams() == 1);
if (is_main_stream) {
main_stream_index_ = stream_index;
Status ChunkingHandler::OnStreamInfo(std::shared_ptr<const StreamInfo> info) {
time_scale_ = info->time_scale();
segment_duration_ =
chunking_params_.segment_duration_in_seconds * time_scale;
chunking_params_.segment_duration_in_seconds * time_scale_;
subsegment_duration_ =
chunking_params_.subsegment_duration_in_seconds * time_scale;
} else if (info->stream_type() == kStreamVideo) {
return Status(error::CHUNKING_ERROR,
"Only one video stream is allowed per chunking handler.");
chunking_params_.subsegment_duration_in_seconds * time_scale_;
return DispatchStreamInfo(kStreamIndex, std::move(info));
}
return DispatchStreamInfo(stream_index, std::move(info));
}
Status ChunkingHandler::OnScte35Event(
uint64_t stream_index,
std::shared_ptr<const Scte35Event> event) {
if (stream_index == main_stream_index_) {
scte35_events_.push(std::move(event));
} else {
VLOG(3) << "Dropping scte35 event from non main stream.";
}
return Status::OK;
}
Status ChunkingHandler::OnMediaSample(std::unique_ptr<StreamData> stream_data) {
DCHECK_EQ(StreamDataType::kMediaSample, stream_data->stream_data_type);
const size_t stream_index = stream_data->stream_index;
DCHECK_NE(time_scales_[stream_index], 0u)
<< "kStreamInfo should arrive before kMediaSample";
if (stream_index != main_stream_index_ &&
!stream_data->media_sample->is_key_frame()) {
return Status(error::CHUNKING_ERROR,
"All non video samples should be key frames.");
}
// The streams are expected to be roughly synchronized, so we don't expect
// to see a lot of samples from one stream but no samples from another
// stream.
// The value is kind of arbitrary here. For a 24fps video, it is ~40s.
const size_t kMaxCachedSamplesPerStream = 1000u;
if (num_cached_samples_[stream_index] >= kMaxCachedSamplesPerStream) {
LOG(ERROR) << "Streams are not synchronized:";
for (size_t i = 0; i < num_cached_samples_.size(); ++i)
LOG(ERROR) << " [Stream " << i << "] " << num_cached_samples_[i];
return Status(error::CHUNKING_ERROR, "Streams are not synchronized.");
}
cached_media_sample_stream_data_.push(std::move(stream_data));
++num_cached_samples_[stream_index];
// If we have cached samples from every stream, the first sample in
// |cached_media_samples_stream_data_| is guaranteed to be the earliest
// sample. Extract and process that sample.
if (std::all_of(num_cached_samples_.begin(), num_cached_samples_.end(),
[](size_t num_samples) { return num_samples > 0; })) {
while (true) {
const size_t top_stream_index =
cached_media_sample_stream_data_.top()->stream_index;
Status status =
ProcessMediaSampleStreamData(*cached_media_sample_stream_data_.top());
Status ChunkingHandler::OnCueEvent(std::shared_ptr<const CueEvent> event) {
Status status = EndSegmentIfStarted();
if (!status.ok())
return status;
cached_media_sample_stream_data_.pop();
if (--num_cached_samples_[top_stream_index] == 0)
break;
}
}
return Status::OK;
// Force start new segment after cue event.
segment_start_time_ = base::nullopt;
return DispatchCueEvent(kStreamIndex, std::move(event));
}
Status ChunkingHandler::ProcessMainMediaSample(const MediaSample* sample) {
const bool is_key_frame = sample->is_key_frame();
Status ChunkingHandler::OnMediaSample(
std::shared_ptr<const MediaSample> sample) {
DCHECK_NE(time_scale_, 0u) << "kStreamInfo should arrive before kMediaSample";
const int64_t timestamp = sample->dts();
const int64_t time_scale = time_scales_[main_stream_index_];
const double dts_in_seconds = static_cast<double>(sample->dts()) / time_scale;
// Check if we need to terminate the current (sub)segment.
bool new_segment = false;
bool new_subsegment = false;
std::shared_ptr<CueEvent> cue_event;
if (is_key_frame || !chunking_params_.segment_sap_aligned) {
bool started_new_segment = false;
const bool can_start_new_segment =
sample->is_key_frame() || !chunking_params_.segment_sap_aligned;
if (can_start_new_segment) {
const int64_t segment_index = timestamp / segment_duration_;
if (segment_index != current_segment_index_) {
if (!segment_start_time_ || segment_index != current_segment_index_) {
current_segment_index_ = segment_index;
// Reset subsegment index.
current_subsegment_index_ = 0;
new_segment = true;
}
// We use 'while' instead of 'if' to make sure to pop off multiple SCTE35
// events that may be very close to each other.
while (!scte35_events_.empty() &&
scte35_events_.top()->start_time_in_seconds <= dts_in_seconds) {
// For simplicity, don't change |current_segment_index_|.
current_subsegment_index_ = 0;
new_segment = true;
cue_event = std::make_shared<CueEvent>();
cue_event->time_in_seconds =
static_cast<double>(sample->pts()) / time_scale;
cue_event->cue_data = scte35_events_.top()->cue_data;
LOG(INFO) << "Chunked at " << dts_in_seconds << " seconds for Ad Cue.";
scte35_events_.pop();
}
}
if (!new_segment && subsegment_duration_ > 0 &&
(is_key_frame || !chunking_params_.subsegment_sap_aligned)) {
const int64_t subsegment_index =
(timestamp - segment_info_[main_stream_index_]->start_timestamp) /
subsegment_duration_;
if (subsegment_index != current_subsegment_index_) {
current_subsegment_index_ = subsegment_index;
new_subsegment = true;
}
}
Status status;
if (new_segment) {
status.Update(DispatchSegmentInfoForAllStreams());
segment_info_[main_stream_index_]->start_timestamp = timestamp;
if (cue_event)
status.Update(DispatchCueEventForAllStreams(std::move(cue_event)));
}
if (subsegment_duration_ > 0 && (new_segment || new_subsegment)) {
status.Update(DispatchSubsegmentInfoForAllStreams());
subsegment_info_[main_stream_index_]->start_timestamp = timestamp;
}
return status;
}
Status ChunkingHandler::ProcessMediaSampleStreamData(
const StreamData& media_sample_stream_data) {
const size_t stream_index = media_sample_stream_data.stream_index;
const auto sample = std::move(media_sample_stream_data.media_sample);
if (stream_index == main_stream_index_) {
Status status = ProcessMainMediaSample(sample.get());
Status status = EndSegmentIfStarted();
if (!status.ok())
return status;
segment_start_time_ = timestamp;
subsegment_start_time_ = timestamp;
started_new_segment = true;
}
}
if (!started_new_segment && IsSubsegmentEnabled()) {
const bool can_start_new_subsegment =
sample->is_key_frame() || !chunking_params_.subsegment_sap_aligned;
if (can_start_new_subsegment) {
const int64_t subsegment_index =
(timestamp - segment_start_time_.value()) / subsegment_duration_;
if (subsegment_index != current_subsegment_index_) {
current_subsegment_index_ = subsegment_index;
Status status = EndSubsegmentIfStarted();
if (!status.ok())
return status;
subsegment_start_time_ = timestamp;
}
}
}
VLOG(3) << "Stream index: " << stream_index << " "
<< "Sample ts: " << sample->dts() << " "
<< " duration: " << sample->duration()
<< " scale: " << time_scales_[stream_index] << "\n"
<< " scale: " << time_scales_[main_stream_index_]
<< (segment_info_[stream_index] ? " dispatch " : " discard ");
VLOG(3) << "Sample ts: " << timestamp << " "
<< " duration: " << sample->duration() << " scale: " << time_scale_
<< (segment_start_time_ ? " dispatch " : " discard ");
// Discard samples before segment start. If the segment has started,
// |segment_info_[stream_index]| won't be null.
if (!segment_info_[stream_index])
// |segment_start_time_| won't be null.
if (!segment_start_time_)
return Status::OK;
if (segment_info_[stream_index]->start_timestamp == -1)
segment_info_[stream_index]->start_timestamp = sample->dts();
if (subsegment_info_[stream_index] &&
subsegment_info_[stream_index]->start_timestamp == -1) {
subsegment_info_[stream_index]->start_timestamp = sample->dts();
}
last_sample_end_timestamps_[stream_index] =
sample->dts() + sample->duration();
return DispatchMediaSample(stream_index, std::move(sample));
last_sample_end_timestamp_ = timestamp + sample->duration();
return DispatchMediaSample(kStreamIndex, std::move(sample));
}
Status ChunkingHandler::DispatchSegmentInfoForAllStreams() {
Status status;
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
if (segment_info_[i] && segment_info_[i]->start_timestamp != -1) {
segment_info_[i]->duration =
last_sample_end_timestamps_[i] - segment_info_[i]->start_timestamp;
status.Update(DispatchSegmentInfo(i, std::move(segment_info_[i])));
}
segment_info_[i].reset(new SegmentInfo);
subsegment_info_[i].reset();
}
return status;
Status ChunkingHandler::EndSegmentIfStarted() const {
if (!segment_start_time_)
return Status::OK;
auto segment_info = std::make_shared<SegmentInfo>();
segment_info->start_timestamp = segment_start_time_.value();
segment_info->duration =
last_sample_end_timestamp_ - segment_start_time_.value();
return DispatchSegmentInfo(kStreamIndex, std::move(segment_info));
}
Status ChunkingHandler::DispatchSubsegmentInfoForAllStreams() {
Status status;
for (size_t i = 0; i < subsegment_info_.size() && status.ok(); ++i) {
if (subsegment_info_[i] && subsegment_info_[i]->start_timestamp != -1) {
subsegment_info_[i]->duration =
last_sample_end_timestamps_[i] - subsegment_info_[i]->start_timestamp;
status.Update(DispatchSegmentInfo(i, std::move(subsegment_info_[i])));
}
subsegment_info_[i].reset(new SegmentInfo);
subsegment_info_[i]->is_subsegment = true;
}
return status;
}
Status ChunkingHandler::EndSubsegmentIfStarted() const {
if (!subsegment_start_time_)
return Status::OK;
Status ChunkingHandler::DispatchCueEventForAllStreams(
std::shared_ptr<CueEvent> cue_event) {
Status status;
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
status.Update(DispatchCueEvent(i, cue_event));
}
return status;
}
ChunkingHandler::MediaSampleTimestampGreater::MediaSampleTimestampGreater(
const ChunkingHandler* const chunking_handler)
: chunking_handler_(chunking_handler) {}
bool ChunkingHandler::MediaSampleTimestampGreater::operator()(
const std::unique_ptr<StreamData>& lhs,
const std::unique_ptr<StreamData>& rhs) const {
DCHECK(lhs);
DCHECK(rhs);
return GetSampleTimeInSeconds(*lhs) > GetSampleTimeInSeconds(*rhs);
}
double ChunkingHandler::MediaSampleTimestampGreater::GetSampleTimeInSeconds(
const StreamData& media_sample_stream_data) const {
const size_t stream_index = media_sample_stream_data.stream_index;
const auto& sample = media_sample_stream_data.media_sample;
DCHECK(sample);
// Order main samples by left boundary and non main samples by mid-point. This
// ensures non main samples are properly chunked, i.e. if the portion of the
// sample in the next chunk is bigger than the portion of the sample in the
// previous chunk, the sample is placed in the next chunk.
const uint64_t timestamp =
stream_index == chunking_handler_->main_stream_index_
? sample->dts()
: (sample->dts() + sample->duration() / 2);
return static_cast<double>(timestamp) /
chunking_handler_->time_scales_[stream_index];
}
bool ChunkingHandler::Scte35EventTimestampGreater::operator()(
const std::shared_ptr<const Scte35Event>& lhs,
const std::shared_ptr<const Scte35Event>& rhs) const {
DCHECK(lhs);
DCHECK(rhs);
return lhs->start_time_in_seconds > rhs->start_time_in_seconds;
auto subsegment_info = std::make_shared<SegmentInfo>();
subsegment_info->start_timestamp = subsegment_start_time_.value();
subsegment_info->duration =
last_sample_end_timestamp_ - subsegment_start_time_.value();
subsegment_info->is_subsegment = true;
return DispatchSegmentInfo(kStreamIndex, std::move(subsegment_info));
}
} // namespace media

View File

@ -11,6 +11,7 @@
#include <queue>
#include "packager/base/logging.h"
#include "packager/base/optional.h"
#include "packager/media/base/media_handler.h"
#include "packager/media/public/chunking_params.h"
@ -19,9 +20,7 @@ namespace media {
/// ChunkingHandler splits the samples into segments / subsegments based on the
/// specified chunking params.
/// This handler is a multi-in multi-out handler. If more than one input is
/// provided, there should be one and only one video stream; also, all inputs
/// should come from the same thread and are synchronized.
/// This handler is a one-in one-out handler.
/// There can be multiple chunking handler running in different threads or even
/// different processes, we use the "consistent chunking algorithm" to make sure
/// the chunks in different streams are aligned without explicit communcating
@ -36,17 +35,11 @@ namespace media {
/// 2. Chunk only at the consistent chunkable boundary
///
/// This algorithm will make sure the chunks from different video streams are
/// aligned if they have aligned GoPs. However, this algorithm will only work
/// for video streams. To be able to chunk non video streams at similar
/// positions as video streams, ChunkingHandler is designed to accept one video
/// input and multiple non video inputs, the non video inputs are chunked when
/// the video input is chunked. If the inputs are synchronized - which is true
/// if the inputs come from the same demuxer, the video and non video chunks
/// are aligned.
/// aligned if they have aligned GoPs.
class ChunkingHandler : public MediaHandler {
public:
explicit ChunkingHandler(const ChunkingParams& chunking_params);
~ChunkingHandler() override;
~ChunkingHandler() override = default;
protected:
/// @name MediaHandler implementation overrides.
@ -62,84 +55,34 @@ class ChunkingHandler : public MediaHandler {
ChunkingHandler(const ChunkingHandler&) = delete;
ChunkingHandler& operator=(const ChunkingHandler&) = delete;
Status OnStreamInfo(uint64_t stream_index,
std::shared_ptr<const StreamInfo> info);
Status OnScte35Event(uint64_t stream_index,
std::shared_ptr<const Scte35Event> event);
Status OnStreamInfo(std::shared_ptr<const StreamInfo> info);
Status OnCueEvent(std::shared_ptr<const CueEvent> event);
Status OnMediaSample(std::shared_ptr<const MediaSample> sample);
Status OnMediaSample(std::unique_ptr<StreamData> stream_data);
Status EndSegmentIfStarted() const;
Status EndSubsegmentIfStarted() const;
// Processes main media sample and apply chunking if needed.
Status ProcessMainMediaSample(const MediaSample* sample);
// Processes and dispatches media sample.
Status ProcessMediaSampleStreamData(const StreamData& media_data);
// The (sub)segments are aligned and dispatched together.
Status DispatchSegmentInfoForAllStreams();
Status DispatchSubsegmentInfoForAllStreams();
Status DispatchCueEventForAllStreams(std::shared_ptr<CueEvent> cue_event);
bool IsSubsegmentEnabled() {
return subsegment_duration_ > 0 &&
subsegment_duration_ != segment_duration_;
}
const ChunkingParams chunking_params_;
// The inputs are expected to come from the same thread.
std::atomic<int64_t> thread_id_;
// The video stream is the main stream; if there is only one stream, it is the
// main stream. The chunking is based on the main stream.
const size_t kInvalidStreamIndex = static_cast<size_t>(-1);
size_t main_stream_index_ = kInvalidStreamIndex;
// Segment and subsegment duration in main stream's time scale.
// Segment and subsegment duration in stream's time scale.
int64_t segment_duration_ = 0;
int64_t subsegment_duration_ = 0;
class MediaSampleTimestampGreater {
public:
explicit MediaSampleTimestampGreater(
const ChunkingHandler* const chunking_handler);
// Comparison operator. Used by |media_samples_| priority queue below to
// sort the media samples.
bool operator()(const std::unique_ptr<StreamData>& lhs,
const std::unique_ptr<StreamData>& rhs) const;
private:
double GetSampleTimeInSeconds(
const StreamData& media_sample_stream_data) const;
const ChunkingHandler* const chunking_handler_ = nullptr;
};
MediaSampleTimestampGreater media_sample_comparator_;
// Caches media samples and sort the samples.
std::priority_queue<std::unique_ptr<StreamData>,
std::vector<std::unique_ptr<StreamData>>,
MediaSampleTimestampGreater>
cached_media_sample_stream_data_;
// Tracks number of cached samples in input streams.
std::vector<size_t> num_cached_samples_;
// Current segment index, useful to determine where to do chunking.
int64_t current_segment_index_ = -1;
// Current subsegment index, useful to determine where to do chunking.
int64_t current_subsegment_index_ = -1;
std::vector<std::shared_ptr<SegmentInfo>> segment_info_;
std::vector<std::shared_ptr<SegmentInfo>> subsegment_info_;
std::vector<uint32_t> time_scales_;
base::Optional<int64_t> segment_start_time_;
base::Optional<int64_t> subsegment_start_time_;
uint32_t time_scale_ = 0;
// The end timestamp of the last dispatched sample.
std::vector<int64_t> last_sample_end_timestamps_;
struct Scte35EventTimestampGreater {
bool operator()(const std::shared_ptr<const Scte35Event>& lhs,
const std::shared_ptr<const Scte35Event>& rhs) const;
};
// Captures all incoming SCTE35 events to identify chunking points. Events
// will be removed from this queue one at a time as soon as the correct
// chunking point is identified in the incoming samples.
std::priority_queue<std::shared_ptr<const Scte35Event>,
std::vector<std::shared_ptr<const Scte35Event>>,
Scte35EventTimestampGreater>
scte35_events_;
int64_t last_sample_end_timestamp_ = 0;
};
} // namespace media

View File

@ -18,12 +18,10 @@ using ::testing::IsEmpty;
namespace shaka {
namespace media {
namespace {
const size_t kStreamIndex0 = 0;
const size_t kStreamIndex1 = 1;
const size_t kStreamIndex = 0;
const uint32_t kTimeScale0 = 800;
const uint32_t kTimeScale1 = 1000;
const int64_t kDuration0 = 200;
const int64_t kDuration1 = 300;
const int64_t kDuration = 300;
const bool kKeyFrame = true;
const bool kIsSubsegment = true;
const bool kEncrypted = true;
@ -57,34 +55,34 @@ TEST_F(ChunkingHandlerTest, AudioNoSubsegmentsThenFlush) {
SetUpChunkingHandler(1, chunking_params);
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
kStreamIndex, GetAudioStreamInfo(kTimeScale0))));
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted)));
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale0, !kEncrypted)));
for (int i = 0; i < 5; ++i) {
ClearOutputStreamDataVector();
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
kStreamIndex, GetMediaSample(i * kDuration, kDuration, kKeyFrame))));
// One output stream_data except when i == 3, which also has SegmentInfo.
if (i == 3) {
EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsSegmentInfo(kStreamIndex0, 0, kDuration1 * 3,
ElementsAre(IsSegmentInfo(kStreamIndex, 0, kDuration * 3,
!kIsSubsegment, !kEncrypted),
IsMediaSample(kStreamIndex0, i * kDuration1,
kDuration1, !kEncrypted)));
IsMediaSample(kStreamIndex, i * kDuration,
kDuration, !kEncrypted)));
} else {
EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsMediaSample(kStreamIndex0, i * kDuration1,
kDuration1, !kEncrypted)));
ElementsAre(IsMediaSample(kStreamIndex, i * kDuration,
kDuration, !kEncrypted)));
}
}
ClearOutputStreamDataVector();
ASSERT_OK(OnFlushRequest(kStreamIndex0));
ASSERT_OK(OnFlushRequest(kStreamIndex));
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(IsSegmentInfo(kStreamIndex0, kDuration1 * 3, kDuration1 * 2,
ElementsAre(IsSegmentInfo(kStreamIndex, kDuration * 3, kDuration * 2,
!kIsSubsegment, !kEncrypted)));
}
@ -95,25 +93,24 @@ TEST_F(ChunkingHandlerTest, AudioWithSubsegments) {
SetUpChunkingHandler(1, chunking_params);
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
kStreamIndex, GetAudioStreamInfo(kTimeScale0))));
for (int i = 0; i < 5; ++i) {
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
kStreamIndex, GetMediaSample(i * kDuration, kDuration, kKeyFrame))));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted),
IsMediaSample(kStreamIndex0, 0, kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kDuration1, kDuration1, !kEncrypted),
IsSegmentInfo(kStreamIndex0, 0, kDuration1 * 2, kIsSubsegment,
IsStreamInfo(kStreamIndex, kTimeScale0, !kEncrypted),
IsMediaSample(kStreamIndex, 0, kDuration, !kEncrypted),
IsMediaSample(kStreamIndex, kDuration, kDuration, !kEncrypted),
IsSegmentInfo(kStreamIndex, 0, kDuration * 2, kIsSubsegment,
!kEncrypted),
IsMediaSample(kStreamIndex0, 2 * kDuration1, kDuration1, !kEncrypted),
IsSegmentInfo(kStreamIndex0, 0, kDuration1 * 3, !kIsSubsegment,
IsMediaSample(kStreamIndex, 2 * kDuration, kDuration, !kEncrypted),
IsSegmentInfo(kStreamIndex, 0, kDuration * 3, !kIsSubsegment,
!kEncrypted),
IsMediaSample(kStreamIndex0, 3 * kDuration1, kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, 4 * kDuration1, kDuration1,
!kEncrypted)));
IsMediaSample(kStreamIndex, 3 * kDuration, kDuration, !kEncrypted),
IsMediaSample(kStreamIndex, 4 * kDuration, kDuration, !kEncrypted)));
}
TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
@ -123,193 +120,79 @@ TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
SetUpChunkingHandler(1, chunking_params);
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetVideoStreamInfo(kTimeScale1))));
kStreamIndex, GetVideoStreamInfo(kTimeScale1))));
const int64_t kVideoStartTimestamp = 12345;
for (int i = 0; i < 6; ++i) {
// Alternate key frame.
const bool is_key_frame = (i % 2) == 1;
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
kDuration1, is_key_frame))));
kStreamIndex, GetMediaSample(kVideoStartTimestamp + i * kDuration,
kDuration, is_key_frame))));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsStreamInfo(kStreamIndex0, kTimeScale1, !kEncrypted),
IsStreamInfo(kStreamIndex, kTimeScale1, !kEncrypted),
// The first samples @ kStartTimestamp is discarded - not key frame.
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration,
kDuration, !kEncrypted),
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 2,
kDuration, !kEncrypted),
// The next segment boundary 13245 / 1000 != 12645 / 1000.
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp + kDuration1,
kDuration1 * 2, !kIsSubsegment, !kEncrypted),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 3,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 4,
kDuration1, !kEncrypted),
// The subsegment has duration kDuration1 * 2 since it can only
IsSegmentInfo(kStreamIndex, kVideoStartTimestamp + kDuration,
kDuration * 2, !kIsSubsegment, !kEncrypted),
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 3,
kDuration, !kEncrypted),
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 4,
kDuration, !kEncrypted),
// The subsegment has duration kDuration * 2 since it can only
// terminate before key frame.
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 3,
kDuration1 * 2, kIsSubsegment, !kEncrypted),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 5,
kDuration1, !kEncrypted)));
IsSegmentInfo(kStreamIndex, kVideoStartTimestamp + kDuration * 3,
kDuration * 2, kIsSubsegment, !kEncrypted),
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 5,
kDuration, !kEncrypted)));
}
TEST_F(ChunkingHandlerTest, AudioAndVideo) {
ChunkingParams chunking_params;
chunking_params.segment_duration_in_seconds = 1;
chunking_params.subsegment_duration_in_seconds = 0.3;
SetUpChunkingHandler(2, chunking_params);
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex1, GetVideoStreamInfo(kTimeScale1))));
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted),
IsStreamInfo(kStreamIndex1, kTimeScale1, !kEncrypted)));
ClearOutputStreamDataVector();
// Equivalent to 12345 in video timescale.
const int64_t kAudioStartTimestamp = 9876;
const int64_t kVideoStartTimestamp = 12345;
// Burst of audio and video samples. They will be properly ordered.
for (int i = 0; i < 5; ++i) {
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, GetMediaSample(kAudioStartTimestamp + kDuration0 * i,
kDuration0, true))));
}
for (int i = 0; i < 5; ++i) {
// Alternate key frame.
const bool is_key_frame = (i % 2) == 1;
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex1, GetMediaSample(kVideoStartTimestamp + kDuration1 * i,
kDuration1, is_key_frame))));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
// The first samples @ kStartTimestamp is discarded - not key frame.
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0,
kDuration0, !kEncrypted),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 2,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 2,
kDuration0, !kEncrypted),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 3,
kDuration0, !kEncrypted),
// The audio segment is terminated together with video stream.
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0,
kDuration0 * 3, !kIsSubsegment, !kEncrypted),
// The next segment boundary 13245 / 1000 != 12645 / 1000.
IsSegmentInfo(kStreamIndex1, kVideoStartTimestamp + kDuration1,
kDuration1 * 2, !kIsSubsegment, !kEncrypted),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
kDuration0, !kEncrypted)));
ClearOutputStreamDataVector();
// The side comments below show the equivalent timestamp in video timescale.
// The audio and video are made ~aligned.
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(kAudioStartTimestamp + kDuration0 * 5, kDuration0,
true)))); // 13595
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex1,
GetMediaSample(kVideoStartTimestamp + kDuration1 * 5, kDuration1,
true)))); // 13845
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(kAudioStartTimestamp + kDuration0 * 6, kDuration0,
true)))); // 13845
// This expectation are separated from the expectation above because
// ElementsAre supports at most 10 elements.
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 4,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 5,
kDuration0, !kEncrypted),
// Audio is terminated along with video below.
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
kDuration0 * 2, kIsSubsegment, !kEncrypted),
// The subsegment has duration kDuration1 * 2 since it can only
// terminate before key frame.
IsSegmentInfo(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
kDuration1 * 2, kIsSubsegment, !kEncrypted),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 5,
kDuration1, !kEncrypted)));
ClearOutputStreamDataVector();
ASSERT_OK(OnFlushRequest(kStreamIndex0));
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 6,
kDuration0, !kEncrypted),
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
kDuration0 * 3, !kIsSubsegment, !kEncrypted)));
ClearOutputStreamDataVector();
ASSERT_OK(OnFlushRequest(kStreamIndex1));
EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsSegmentInfo(
kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
kDuration1 * 3, !kIsSubsegment, !kEncrypted)));
// Flush again will do nothing.
ClearOutputStreamDataVector();
ASSERT_OK(OnFlushRequest(kStreamIndex0));
ASSERT_OK(OnFlushRequest(kStreamIndex1));
EXPECT_THAT(GetOutputStreamDataVector(), IsEmpty());
}
TEST_F(ChunkingHandlerTest, Scte35Event) {
TEST_F(ChunkingHandlerTest, CueEvent) {
ChunkingParams chunking_params;
chunking_params.segment_duration_in_seconds = 1;
chunking_params.subsegment_duration_in_seconds = 0.5;
SetUpChunkingHandler(1, chunking_params);
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetVideoStreamInfo(kTimeScale1))));
kStreamIndex, GetVideoStreamInfo(kTimeScale1))));
const int64_t kVideoStartTimestamp = 12345;
const double kScte35TimeInSeconds =
static_cast<double>(kVideoStartTimestamp + kDuration1) / kTimeScale1;
const double kCueTimeInSeconds =
static_cast<double>(kVideoStartTimestamp + kDuration) / kTimeScale1;
auto scte35_event = std::make_shared<Scte35Event>();
scte35_event->start_time_in_seconds = kScte35TimeInSeconds;
ASSERT_OK(Process(StreamData::FromScte35Event(kStreamIndex0, scte35_event)));
auto cue_event = std::make_shared<CueEvent>();
cue_event->time_in_seconds = kCueTimeInSeconds;
for (int i = 0; i < 3; ++i) {
const bool is_key_frame = true;
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
kDuration1, is_key_frame))));
kStreamIndex, GetMediaSample(kVideoStartTimestamp + i * kDuration,
kDuration, is_key_frame))));
if (i == 0) {
ASSERT_OK(Process(StreamData::FromCueEvent(kStreamIndex, cue_event)));
}
}
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsStreamInfo(kStreamIndex0, kTimeScale1, !kEncrypted),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp, kDuration1,
IsStreamInfo(kStreamIndex, kTimeScale1, !kEncrypted),
IsMediaSample(kStreamIndex, kVideoStartTimestamp, kDuration,
!kEncrypted),
// A new segment is created due to the existance of Cue.
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp, kDuration1,
IsSegmentInfo(kStreamIndex, kVideoStartTimestamp, kDuration,
!kIsSubsegment, !kEncrypted),
IsCueEvent(kStreamIndex0, kScte35TimeInSeconds),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 1,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,
kDuration1, !kEncrypted)));
IsCueEvent(kStreamIndex, kCueTimeInSeconds),
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 1,
kDuration, !kEncrypted),
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 2,
kDuration, !kEncrypted)));
}
} // namespace media

View File

@ -20,7 +20,18 @@ double TimeInSeconds(const StreamInfo& info, const StreamData& data) {
switch (data.stream_data_type) {
case StreamDataType::kMediaSample:
time_scale = info.time_scale();
if (info.stream_type() == kStreamAudio) {
// Return the start time for video and mid-point for audio, so that for
// an audio sample, if the portion of the sample after the cue point is
// bigger than the portion of the sample before the cue point, the
// sample is placed after the cue.
// It does not matter for text samples as text samples will be cut at
// cue point.
scaled_time =
data.media_sample->pts() + data.media_sample->duration() / 2;
} else {
scaled_time = data.media_sample->pts();
}
break;
case StreamDataType::kTextSample:
// Text is always in MS but the stream info time scale is 0.
@ -80,7 +91,7 @@ Status CueAlignmentHandler::OnFlushRequest(size_t stream_index) {
if (!stream_state.samples.empty()) {
LOG(WARNING) << "Unexpected data seen on stream " << i;
while (!stream_state.samples.empty()) {
Status status(Dispatch(std::move(stream_state.samples.front())));
Status status = Dispatch(std::move(stream_state.samples.front()));
if (!status.ok())
return status;
stream_state.samples.pop_front();
@ -141,7 +152,7 @@ Status CueAlignmentHandler::OnSample(std::unique_ptr<StreamData> sample) {
// Accept the sample. This will output it if it comes before the hint point or
// will cache it if it comes after the hint point.
Status status(AcceptSample(std::move(sample), &stream_state));
Status status = AcceptSample(std::move(sample), &stream_state);
if (!status.ok()) {
return status;
}
@ -180,6 +191,8 @@ Status CueAlignmentHandler::UseNewSyncPoint(
// No stream should be so out of sync with the others that they would
// still be working on an old cue.
if (stream_state.cue) {
// TODO(kqyang): Could this happen for text when there are no text samples
// between the two cues?
LOG(ERROR) << "Found two cue events that are too close together. One at "
<< stream_state.cue->time_in_seconds << " and the other at "
<< new_sync->time_in_seconds;

View File

@ -33,6 +33,7 @@
#include "packager/media/base/muxer_options.h"
#include "packager/media/base/muxer_util.h"
#include "packager/media/chunking/chunking_handler.h"
#include "packager/media/chunking/cue_alignment_handler.h"
#include "packager/media/crypto/encryption_handler.h"
#include "packager/media/demuxer/demuxer.h"
#include "packager/media/event/muxer_listener_factory.h"
@ -55,6 +56,7 @@ namespace shaka {
using media::Demuxer;
using media::KeySource;
using media::MuxerOptions;
using media::SyncPointQueue;
namespace media {
namespace {
@ -375,8 +377,15 @@ std::shared_ptr<MediaHandler> CreateEncryptionHandler(
Status CreateMp4ToMp4TextJob(const StreamDescriptor& stream,
const PackagingParams& packaging_params,
std::unique_ptr<MuxerListener> muxer_listener,
SyncPointQueue* sync_points,
MuxerFactory* muxer_factory,
std::shared_ptr<OriginHandler>* root) {
// TODO(kqyang): This need to be integrated back to media pipeline since we
// may want to get not only text streams from the demuxer, in which case, the
// same demuxer should be used to get all streams instead of having a demuxer
// specifically for text.
// TODO(kqyang): Support Cue Alignment if |sync_points| is not null.
Status status;
std::shared_ptr<Demuxer> demuxer;
@ -385,14 +394,15 @@ Status CreateMp4ToMp4TextJob(const StreamDescriptor& stream,
demuxer->SetLanguageOverride(stream.stream_selector, stream.language);
}
std::shared_ptr<MediaHandler> chunker(
new ChunkingHandler(packaging_params.chunking_params));
auto chunker =
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
std::shared_ptr<Muxer> muxer =
muxer_factory->CreateMuxer(GetOutputFormat(stream), stream);
muxer->SetMuxerListener(std::move(muxer_listener));
status.Update(chunker->AddHandler(std::move(muxer)));
status.Update(demuxer->SetHandler(stream.stream_selector, chunker));
status.Update(
demuxer->SetHandler(stream.stream_selector, std::move(chunker)));
return status;
}
@ -400,7 +410,10 @@ Status CreateMp4ToMp4TextJob(const StreamDescriptor& stream,
Status CreateHlsTextJob(const StreamDescriptor& stream,
const PackagingParams& packaging_params,
std::unique_ptr<MuxerListener> muxer_listener,
SyncPointQueue* sync_points,
JobManager* job_manager) {
// TODO(kqyang): Support Cue Alignment if |sync_points| is not null.
DCHECK(muxer_listener);
DCHECK(job_manager);
@ -421,9 +434,8 @@ Status CreateHlsTextJob(const StreamDescriptor& stream,
MuxerOptions muxer_options = CreateMuxerOptions(stream, packaging_params);
muxer_options.bandwidth = stream.bandwidth ? stream.bandwidth : 256;
std::shared_ptr<WebVttSegmentedOutputHandler> output(
new WebVttSegmentedOutputHandler(muxer_options,
std::move(muxer_listener)));
auto output = std::make_shared<WebVttSegmentedOutputHandler>(
muxer_options, std::move(muxer_listener));
std::unique_ptr<FileReader> reader;
Status open_status = FileReader::Open(stream.input, &reader);
@ -431,10 +443,9 @@ Status CreateHlsTextJob(const StreamDescriptor& stream,
return open_status;
}
std::shared_ptr<OriginHandler> parser(
new WebVttParser(std::move(reader), stream.language));
std::shared_ptr<MediaHandler> segmenter(
new WebVttSegmenter(segment_length_in_ms));
auto parser =
std::make_shared<WebVttParser>(std::move(reader), stream.language);
auto segmenter = std::make_shared<WebVttSegmenter>(segment_length_in_ms);
// Build in reverse to allow us to move the pointers.
Status status;
@ -451,8 +462,11 @@ Status CreateHlsTextJob(const StreamDescriptor& stream,
Status CreateWebVttToMp4TextJob(const StreamDescriptor& stream,
const PackagingParams& packaging_params,
std::unique_ptr<MuxerListener> muxer_listener,
SyncPointQueue* sync_points,
MuxerFactory* muxer_factory,
std::shared_ptr<OriginHandler>* root) {
// TODO(kqyang): Support Cue Alignment if |sync_points| is not null.
Status status;
std::unique_ptr<FileReader> reader;
status = FileReader::Open(stream.input, &reader);
@ -461,11 +475,11 @@ Status CreateWebVttToMp4TextJob(const StreamDescriptor& stream,
return status;
}
std::shared_ptr<OriginHandler> parser(
new WebVttParser(std::move(reader), stream.language));
std::shared_ptr<MediaHandler> text_to_mp4(new WebVttToMp4Handler);
std::shared_ptr<MediaHandler> chunker(
new ChunkingHandler(packaging_params.chunking_params));
auto parser =
std::make_shared<WebVttParser>(std::move(reader), stream.language);
auto text_to_mp4 = std::make_shared<WebVttToMp4Handler>();
auto chunker =
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
std::shared_ptr<Muxer> muxer =
muxer_factory->CreateMuxer(GetOutputFormat(stream), stream);
muxer->SetMuxerListener(std::move(muxer_listener));
@ -482,6 +496,7 @@ Status CreateWebVttToMp4TextJob(const StreamDescriptor& stream,
Status CreateTextJobs(
const std::vector<std::reference_wrapper<const StreamDescriptor>>& streams,
const PackagingParams& packaging_params,
SyncPointQueue* sync_points,
MuxerListenerFactory* muxer_listener_factory,
MuxerFactory* muxer_factory,
MpdNotifier* mpd_notifier,
@ -498,14 +513,14 @@ Status CreateTextJobs(
switch (DetermineContainerFromFileName(stream.input)) {
case CONTAINER_WEBVTT:
status.Update(CreateWebVttToMp4TextJob(stream, packaging_params,
std::move(muxer_listener),
status.Update(CreateWebVttToMp4TextJob(
stream, packaging_params, std::move(muxer_listener), sync_points,
muxer_factory, &root));
break;
case CONTAINER_MOV:
status.Update(CreateMp4ToMp4TextJob(stream, packaging_params,
std::move(muxer_listener),
status.Update(CreateMp4ToMp4TextJob(
stream, packaging_params, std::move(muxer_listener), sync_points,
muxer_factory, &root));
break;
@ -543,8 +558,9 @@ Status CreateTextJobs(
// If we are outputting to HLS, then create the HLS test pipeline that
// will create segmented text output.
if (hls_listener) {
Status status = CreateHlsTextJob(stream, packaging_params,
std::move(hls_listener), job_manager);
Status status =
CreateHlsTextJob(stream, packaging_params, std::move(hls_listener),
sync_points, job_manager);
if (!status.ok()) {
return status;
}
@ -592,6 +608,7 @@ Status CreateAudioVideoJobs(
const std::vector<std::reference_wrapper<const StreamDescriptor>>& streams,
const PackagingParams& packaging_params,
KeySource* encryption_key_source,
SyncPointQueue* sync_points,
MuxerListenerFactory* muxer_listener_factory,
MuxerFactory* muxer_factory,
JobManager* job_manager) {
@ -601,14 +618,14 @@ Status CreateAudioVideoJobs(
// Demuxers are shared among all streams with the same input.
std::shared_ptr<Demuxer> demuxer;
// Chunkers can be shared among all streams with the same input (except for
// WVM files), which allows samples from the same input to be synced when
// doing chunking.
std::shared_ptr<MediaHandler> chunker;
bool is_wvm_file = false;
// When |sync_points| is not null, there should be one CueAlignmentHandler per
// input. All CueAlignmentHandler shares the same |sync_points|, which allows
// sync points / cues to be aligned across streams, whether they are from the
// same input or not.
std::shared_ptr<CueAlignmentHandler> cue_aligner;
// Replicators are shared among all streams with the same input and stream
// selector.
std::shared_ptr<MediaHandler> replicator;
std::shared_ptr<Replicator> replicator;
std::string previous_input;
std::string previous_selector;
@ -624,15 +641,8 @@ Status CreateAudioVideoJobs(
job_manager->Add("RemuxJob", demuxer);
// Share chunkers among all streams with the same input except for WVM
// file, which may contain multiple video files and the samples may not be
// interleaved either.
is_wvm_file =
DetermineContainerFromFileName(stream.input) == CONTAINER_WVM;
if (!is_wvm_file) {
chunker =
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
}
if (sync_points)
cue_aligner = std::make_shared<CueAlignmentHandler>(sync_points);
}
if (!stream.language.empty()) {
@ -651,16 +661,8 @@ Status CreateAudioVideoJobs(
}
if (new_stream) {
std::shared_ptr<MediaHandler> ad_cue_generator;
if (!packaging_params.ad_cue_generator_params.cue_points.empty()) {
ad_cue_generator = std::make_shared<AdCueGenerator>(
packaging_params.ad_cue_generator_params);
}
if (is_wvm_file) {
chunker =
auto chunker =
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
}
std::shared_ptr<MediaHandler> encryptor = CreateEncryptionHandler(
packaging_params, stream, encryption_key_source);
@ -668,10 +670,9 @@ Status CreateAudioVideoJobs(
replicator = std::make_shared<Replicator>();
Status status;
if (ad_cue_generator) {
status.Update(
demuxer->SetHandler(stream.stream_selector, ad_cue_generator));
status.Update(ad_cue_generator->AddHandler(chunker));
if (cue_aligner) {
status.Update(demuxer->SetHandler(stream.stream_selector, cue_aligner));
status.Update(cue_aligner->AddHandler(chunker));
} else {
status.Update(demuxer->SetHandler(stream.stream_selector, chunker));
}
@ -729,6 +730,7 @@ Status CreateAllJobs(const std::vector<StreamDescriptor>& stream_descriptors,
const PackagingParams& packaging_params,
MpdNotifier* mpd_notifier,
KeySource* encryption_key_source,
SyncPointQueue* sync_points,
MuxerListenerFactory* muxer_listener_factory,
MuxerFactory* muxer_factory,
JobManager* job_manager) {
@ -758,11 +760,11 @@ Status CreateAllJobs(const std::vector<StreamDescriptor>& stream_descriptors,
media::StreamDescriptorCompareFn);
Status status;
status.Update(CreateTextJobs(text_streams, packaging_params,
status.Update(CreateTextJobs(text_streams, packaging_params, sync_points,
muxer_listener_factory, muxer_factory,
mpd_notifier, job_manager));
status.Update(CreateAudioVideoJobs(
audio_video_streams, packaging_params, encryption_key_source,
audio_video_streams, packaging_params, encryption_key_source, sync_points,
muxer_listener_factory, muxer_factory, job_manager));
if (!status.ok()) {
@ -783,6 +785,7 @@ struct Packager::PackagerInternal {
std::unique_ptr<KeySource> encryption_key_source;
std::unique_ptr<MpdNotifier> mpd_notifier;
std::unique_ptr<hls::HlsNotifier> hls_notifier;
std::unique_ptr<SyncPointQueue> sync_points;
BufferCallbackParams buffer_callback_params;
media::JobManager job_manager;
};
@ -854,6 +857,11 @@ Status Packager::Initialize(
internal->hls_notifier.reset(new hls::SimpleHlsNotifier(hls_params));
}
if (!packaging_params.ad_cue_generator_params.cue_points.empty()) {
internal->sync_points.reset(
new SyncPointQueue(packaging_params.ad_cue_generator_params));
}
std::vector<StreamDescriptor> streams_for_jobs;
for (const StreamDescriptor& descriptor : stream_descriptors) {
@ -896,8 +904,8 @@ Status Packager::Initialize(
Status status = media::CreateAllJobs(
streams_for_jobs, packaging_params, internal->mpd_notifier.get(),
internal->encryption_key_source.get(), &muxer_listener_factory,
&muxer_factory, &internal->job_manager);
internal->encryption_key_source.get(), internal->sync_points.get(),
&muxer_listener_factory, &muxer_factory, &internal->job_manager);
if (!status.ok()) {
return status;