Integrate CueAlignmentHandler
Also changed ChunkingHandler to be one-one handler. Issue: #355 Change-Id: Ie98a96bcc0ddded347699c9f333f604826976d11
This commit is contained in:
parent
e685c8a63a
commit
e1bb27f130
|
@ -806,8 +806,8 @@ class PackagerFunctionalTest(PackagerAppTest):
|
||||||
self.assertPackageSuccess(
|
self.assertPackageSuccess(
|
||||||
self._GetStreams(['audio', 'video']),
|
self._GetStreams(['audio', 'video']),
|
||||||
self._GetFlags(encryption=True, ad_cues='1.5'))
|
self._GetFlags(encryption=True, ad_cues='1.5'))
|
||||||
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-golden.mp4')
|
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-ad_cues-golden.mp4')
|
||||||
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-golden.mp4')
|
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-ad_cues-golden.mp4')
|
||||||
self._DiffGold(self.mpd_output, 'bear-640x360-av-cenc-ad_cues-golden.mpd')
|
self._DiffGold(self.mpd_output, 'bear-640x360-av-cenc-ad_cues-golden.mpd')
|
||||||
self._VerifyDecryption(self.output[0], 'bear-640x360-a-demuxed-golden.mp4')
|
self._VerifyDecryption(self.output[0], 'bear-640x360-a-demuxed-golden.mp4')
|
||||||
self._VerifyDecryption(self.output[1], 'bear-640x360-v-golden.mp4')
|
self._VerifyDecryption(self.output[1], 'bear-640x360-v-golden.mp4')
|
||||||
|
@ -1088,10 +1088,10 @@ class PackagerFunctionalTest(PackagerAppTest):
|
||||||
self.assertPackageSuccess(
|
self.assertPackageSuccess(
|
||||||
self._GetStreams(['audio', 'video'], hls=True),
|
self._GetStreams(['audio', 'video'], hls=True),
|
||||||
self._GetFlags(encryption=True, output_hls=True, ad_cues='1.5'))
|
self._GetFlags(encryption=True, output_hls=True, ad_cues='1.5'))
|
||||||
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-golden.mp4')
|
self._DiffGold(self.output[0], 'bear-640x360-a-cenc-ad_cues-golden.mp4')
|
||||||
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-golden.mp4')
|
self._DiffGold(self.output[1], 'bear-640x360-v-cenc-ad_cues-golden.mp4')
|
||||||
self._DiffGold(self.hls_master_playlist_output,
|
self._DiffGold(self.hls_master_playlist_output,
|
||||||
'bear-640x360-av-mp4-master-cenc-golden.m3u8')
|
'bear-640x360-av-mp4-master-cenc-ad_cues-golden.m3u8')
|
||||||
self._DiffGold(
|
self._DiffGold(
|
||||||
os.path.join(self.tmp_dir, 'audio.m3u8'),
|
os.path.join(self.tmp_dir, 'audio.m3u8'),
|
||||||
'bear-640x360-a-mp4-cenc-ad_cues-golden.m3u8')
|
'bear-640x360-a-mp4-cenc-ad_cues-golden.m3u8')
|
||||||
|
@ -1146,8 +1146,8 @@ class PackagerFunctionalTest(PackagerAppTest):
|
||||||
self.assertPackageSuccess(
|
self.assertPackageSuccess(
|
||||||
self._GetStreams(['audio', 'video'], live=True),
|
self._GetStreams(['audio', 'video'], live=True),
|
||||||
self._GetFlags(generate_static_mpd=True, ad_cues='1.5'))
|
self._GetFlags(generate_static_mpd=True, ad_cues='1.5'))
|
||||||
self._DiffLiveGold(self.output[0], 'bear-640x360-a-live-golden')
|
self._DiffLiveGold(self.output[0], 'bear-640x360-a-live-ad_cues-golden')
|
||||||
self._DiffLiveGold(self.output[1], 'bear-640x360-v-live-golden')
|
self._DiffLiveGold(self.output[1], 'bear-640x360-v-live-ad_cues-golden')
|
||||||
self._DiffGold(self.mpd_output,
|
self._DiffGold(self.mpd_output,
|
||||||
'bear-640x360-av-live-static-ad_cues-golden.mpd')
|
'bear-640x360-av-live-static-ad_cues-golden.mpd')
|
||||||
|
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -7,7 +7,7 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="0" bandwidth="79930" codecs="opus" mimeType="audio/mp4" audioSamplingRate="48000">
|
<Representation id="0" bandwidth="81568" codecs="opus" mimeType="audio/mp4" audioSamplingRate="48000">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="975-1042" timescale="1000000">
|
<SegmentBase indexRange="975-1042" timescale="1000000">
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.736S">
|
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.736S">
|
||||||
<Period id="0">
|
<Period id="0">
|
||||||
<AdaptationSet id="0" contentType="audio" subsegmentAlignment="true">
|
<AdaptationSet id="0" contentType="audio" subsegmentAlignment="true">
|
||||||
<Representation id="0" bandwidth="75444" codecs="opus" mimeType="audio/webm" audioSamplingRate="48000">
|
<Representation id="0" bandwidth="76531" codecs="opus" mimeType="audio/webm" audioSamplingRate="48000">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.webm</BaseURL>
|
<BaseURL>output_audio.webm</BaseURL>
|
||||||
<SegmentBase indexRange="323-371" timescale="1000000">
|
<SegmentBase indexRange="323-371" timescale="1000000">
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,4 +1,4 @@
|
||||||
bandwidth: 129185
|
bandwidth: 129162
|
||||||
audio_info {
|
audio_info {
|
||||||
codec: "mp4a.40.2"
|
codec: "mp4a.40.2"
|
||||||
sampling_frequency: 44100
|
sampling_frequency: 44100
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -3,12 +3,12 @@
|
||||||
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
||||||
#EXT-X-TARGETDURATION:2
|
#EXT-X-TARGETDURATION:2
|
||||||
#EXT-X-PLAYLIST-TYPE:VOD
|
#EXT-X-PLAYLIST-TYPE:VOD
|
||||||
#EXTINF:0.952,
|
#EXTINF:0.975,
|
||||||
output_audio-1.ts
|
output_audio-1.ts
|
||||||
#EXT-X-DISCONTINUITY
|
#EXT-X-DISCONTINUITY
|
||||||
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
|
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXTINF:0.813,
|
#EXTINF:0.789,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -3,9 +3,9 @@
|
||||||
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
||||||
#EXT-X-TARGETDURATION:2
|
#EXT-X-TARGETDURATION:2
|
||||||
#EXT-X-PLAYLIST-TYPE:EVENT
|
#EXT-X-PLAYLIST-TYPE:EVENT
|
||||||
#EXTINF:0.952,
|
#EXTINF:0.975,
|
||||||
output_audio-1.ts
|
output_audio-1.ts
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXTINF:0.813,
|
#EXTINF:0.789,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
|
|
|
@ -3,12 +3,12 @@
|
||||||
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
||||||
#EXT-X-TARGETDURATION:2
|
#EXT-X-TARGETDURATION:2
|
||||||
#EXT-X-PLAYLIST-TYPE:VOD
|
#EXT-X-PLAYLIST-TYPE:VOD
|
||||||
#EXTINF:0.952,
|
#EXTINF:0.975,
|
||||||
output_audio-1.ts
|
output_audio-1.ts
|
||||||
#EXT-X-DISCONTINUITY
|
#EXT-X-DISCONTINUITY
|
||||||
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="skd://www.license.com/getkey?KeyId=31323334-3536-3738-3930-313233343536",KEYFORMATVERSIONS="1",KEYFORMAT="com.apple.streamingkeydelivery"
|
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="skd://www.license.com/getkey?KeyId=31323334-3536-3738-3930-313233343536",KEYFORMATVERSIONS="1",KEYFORMAT="com.apple.streamingkeydelivery"
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXTINF:0.813,
|
#EXTINF:0.789,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -3,10 +3,10 @@
|
||||||
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
||||||
#EXT-X-TARGETDURATION:2
|
#EXT-X-TARGETDURATION:2
|
||||||
#EXT-X-PLAYLIST-TYPE:VOD
|
#EXT-X-PLAYLIST-TYPE:VOD
|
||||||
#EXTINF:0.952,
|
#EXTINF:0.975,
|
||||||
output_audio-1.ts
|
output_audio-1.ts
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXTINF:0.813,
|
#EXTINF:0.789,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -4,9 +4,9 @@
|
||||||
#EXT-X-TARGETDURATION:2
|
#EXT-X-TARGETDURATION:2
|
||||||
#EXT-X-MEDIA-SEQUENCE:1
|
#EXT-X-MEDIA-SEQUENCE:1
|
||||||
#EXT-X-DISCONTINUITY-SEQUENCE:1
|
#EXT-X-DISCONTINUITY-SEQUENCE:1
|
||||||
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
|
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MjM0NTY3ODkwMTIzNDU2MQ==",IV=0x3334353637383930,KEYFORMAT="identity"
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MjM0NTY3ODkwMTIzNDU2MQ==",IV=0x3334353637383930,KEYFORMAT="identity"
|
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MzQ1Njc4OTAxMjM0NTYxMg==",IV=0x3334353637383930,KEYFORMAT="identity"
|
||||||
#EXTINF:0.813,
|
#EXTINF:0.789,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -5,5 +5,5 @@
|
||||||
#EXT-X-MEDIA-SEQUENCE:1
|
#EXT-X-MEDIA-SEQUENCE:1
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXTINF:0.813,
|
#EXTINF:0.789,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
|
|
|
@ -5,14 +5,17 @@
|
||||||
#EXT-X-PLAYLIST-TYPE:VOD
|
#EXT-X-PLAYLIST-TYPE:VOD
|
||||||
#EXT-X-MAP:URI="output_audio.mp4",BYTERANGE="967@0"
|
#EXT-X-MAP:URI="output_audio.mp4",BYTERANGE="967@0"
|
||||||
#EXT-X-KEY:METHOD=SAMPLE-AES-CTR,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",KEYFORMAT="identity"
|
#EXT-X-KEY:METHOD=SAMPLE-AES-CTR,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",KEYFORMAT="identity"
|
||||||
#EXTINF:0.998,
|
#EXTINF:1.022,
|
||||||
#EXT-X-BYTERANGE:16279@1035
|
#EXT-X-BYTERANGE:16655@1047
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
#EXT-X-BYTERANGE:16674
|
#EXT-X-BYTERANGE:16650
|
||||||
|
output_audio.mp4
|
||||||
|
#EXTINF:0.046,
|
||||||
|
#EXT-X-BYTERANGE:1014
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXT-X-PLACEMENT-OPPORTUNITY
|
#EXT-X-PLACEMENT-OPPORTUNITY
|
||||||
#EXTINF:0.766,
|
#EXTINF:0.697,
|
||||||
#EXT-X-BYTERANGE:10632
|
#EXT-X-BYTERANGE:9415
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
|
@ -5,13 +5,13 @@
|
||||||
#EXT-X-PLAYLIST-TYPE:VOD
|
#EXT-X-PLAYLIST-TYPE:VOD
|
||||||
#EXT-X-MAP:URI="output_audio.mp4",BYTERANGE="967@0"
|
#EXT-X-MAP:URI="output_audio.mp4",BYTERANGE="967@0"
|
||||||
#EXT-X-KEY:METHOD=SAMPLE-AES-CTR,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",KEYFORMAT="identity"
|
#EXT-X-KEY:METHOD=SAMPLE-AES-CTR,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",KEYFORMAT="identity"
|
||||||
#EXTINF:0.998,
|
#EXTINF:1.022,
|
||||||
#EXT-X-BYTERANGE:16279@1035
|
#EXT-X-BYTERANGE:16655@1035
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
#EXT-X-BYTERANGE:16674
|
#EXT-X-BYTERANGE:16650
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXTINF:0.766,
|
#EXTINF:0.743,
|
||||||
#EXT-X-BYTERANGE:10632
|
#EXT-X-BYTERANGE:10272
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
|
@ -4,10 +4,10 @@
|
||||||
#EXT-X-TARGETDURATION:2
|
#EXT-X-TARGETDURATION:2
|
||||||
#EXT-X-PLAYLIST-TYPE:VOD
|
#EXT-X-PLAYLIST-TYPE:VOD
|
||||||
#EXT-X-MAP:URI="audio-init.mp4"
|
#EXT-X-MAP:URI="audio-init.mp4"
|
||||||
#EXTINF:0.998,
|
#EXTINF:1.022,
|
||||||
audio-1.m4s
|
audio-1.m4s
|
||||||
#EXTINF:0.998,
|
#EXTINF:0.998,
|
||||||
audio-2.m4s
|
audio-2.m4s
|
||||||
#EXTINF:0.766,
|
#EXTINF:0.743,
|
||||||
audio-3.m4s
|
audio-3.m4s
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -7,8 +7,8 @@
|
||||||
output_audio-1.ts
|
output_audio-1.ts
|
||||||
#EXT-X-DISCONTINUITY
|
#EXT-X-DISCONTINUITY
|
||||||
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
|
#EXT-X-KEY:METHOD=SAMPLE-AES,URI="data:text/plain;base64,MTIzNDU2Nzg5MDEyMzQ1Ng==",IV=0x3334353637383930,KEYFORMAT="identity"
|
||||||
#EXTINF:0.975,
|
#EXTINF:1.010,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXTINF:0.836,
|
#EXTINF:0.801,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -5,8 +5,8 @@
|
||||||
#EXT-X-PLAYLIST-TYPE:VOD
|
#EXT-X-PLAYLIST-TYPE:VOD
|
||||||
#EXTINF:0.975,
|
#EXTINF:0.975,
|
||||||
output_audio-1.ts
|
output_audio-1.ts
|
||||||
#EXTINF:0.975,
|
#EXTINF:1.010,
|
||||||
output_audio-2.ts
|
output_audio-2.ts
|
||||||
#EXTINF:0.836,
|
#EXTINF:0.801,
|
||||||
output_audio-3.ts
|
output_audio-3.ts
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
|
@ -7,10 +7,10 @@
|
||||||
#EXTINF:0.975,
|
#EXTINF:0.975,
|
||||||
#EXT-X-BYTERANGE:23728@794
|
#EXT-X-BYTERANGE:23728@794
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXTINF:0.975,
|
#EXTINF:1.010,
|
||||||
#EXT-X-BYTERANGE:23730
|
#EXT-X-BYTERANGE:24574
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXTINF:0.836,
|
#EXTINF:0.801,
|
||||||
#EXT-X-BYTERANGE:20354
|
#EXT-X-BYTERANGE:19510
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
|
|
||||||
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
||||||
|
|
||||||
#EXT-X-STREAM-INF:BANDWIDTH=1242703,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
|
#EXT-X-STREAM-INF:BANDWIDTH=1242861,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
|
||||||
video.m3u8
|
video.m3u8
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
|
|
||||||
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
||||||
|
|
||||||
#EXT-X-STREAM-INF:BANDWIDTH=1168277,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
|
#EXT-X-STREAM-INF:BANDWIDTH=1168319,CODECS="avc1.64001e,ac-3",RESOLUTION=640x360,AUDIO="default-audio-group"
|
||||||
video.m3u8
|
video.m3u8
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="967-1034" timescale="44100">
|
<SegmentBase indexRange="967-1034" timescale="44100">
|
||||||
|
|
|
@ -2,57 +2,57 @@
|
||||||
<!--Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>-->
|
<!--Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>-->
|
||||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.73607S">
|
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.73607S">
|
||||||
<Period id="0" duration="PT2.002S">
|
<Period id="0" duration="PT2.002S">
|
||||||
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
|
<AdaptationSet id="0" contentType="audio" subsegmentAlignment="true">
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="0" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
|
<Representation id="0" bandwidth="129651" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
|
<SegmentBase indexRange="967-1046" timescale="44100">
|
||||||
|
<Initialization range="0-966"/>
|
||||||
|
</SegmentBase>
|
||||||
|
</Representation>
|
||||||
|
</AdaptationSet>
|
||||||
|
<AdaptationSet id="2" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
|
||||||
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||||
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
|
</ContentProtection>
|
||||||
|
<Representation id="1" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
|
||||||
<BaseURL>output_video.mp4</BaseURL>
|
<BaseURL>output_video.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="1091-1158" timescale="30000">
|
<SegmentBase indexRange="1091-1158" timescale="30000">
|
||||||
<Initialization range="0-1090"/>
|
<Initialization range="0-1090"/>
|
||||||
</SegmentBase>
|
</SegmentBase>
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="2" contentType="audio" subsegmentAlignment="true">
|
</Period>
|
||||||
|
<Period id="1" duration="PT0.734067S">
|
||||||
|
<AdaptationSet id="0" contentType="audio" subsegmentAlignment="true">
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="0" bandwidth="129651" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="967-1034" timescale="44100">
|
<SegmentBase indexRange="967-1046" timescale="44100" presentationTimeOffset="91230">
|
||||||
<Initialization range="0-966"/>
|
<Initialization range="0-966"/>
|
||||||
</SegmentBase>
|
</SegmentBase>
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
</Period>
|
<AdaptationSet id="2" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
|
||||||
<Period id="1" duration="PT0.734067S">
|
|
||||||
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
|
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="0" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
|
<Representation id="1" bandwidth="885590" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
|
||||||
<BaseURL>output_video.mp4</BaseURL>
|
<BaseURL>output_video.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="1091-1158" timescale="30000" presentationTimeOffset="62061">
|
<SegmentBase indexRange="1091-1158" timescale="30000" presentationTimeOffset="62061">
|
||||||
<Initialization range="0-1090"/>
|
<Initialization range="0-1090"/>
|
||||||
</SegmentBase>
|
</SegmentBase>
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="2" contentType="audio" subsegmentAlignment="true">
|
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
|
||||||
</ContentProtection>
|
|
||||||
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
|
||||||
<SegmentBase indexRange="967-1034" timescale="44100" presentationTimeOffset="91230">
|
|
||||||
<Initialization range="0-966"/>
|
|
||||||
</SegmentBase>
|
|
||||||
</Representation>
|
|
||||||
</AdaptationSet>
|
|
||||||
</Period>
|
</Period>
|
||||||
</MPD>
|
</MPD>
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="967-1034" timescale="44100">
|
<SegmentBase indexRange="967-1034" timescale="44100">
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="1" bandwidth="129035" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="129012" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="915-982" timescale="44100">
|
<SegmentBase indexRange="915-982" timescale="44100">
|
||||||
|
|
|
@ -15,7 +15,7 @@
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="1" contentType="audio" subsegmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" subsegmentAlignment="true">
|
||||||
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
|
|
|
@ -19,7 +19,7 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="1" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="967-1034" timescale="44100">
|
<SegmentBase indexRange="967-1034" timescale="44100">
|
||||||
|
|
|
@ -21,12 +21,13 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="1" bandwidth="124859" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="124634" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
<S t="88064" d="33792"/>
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="32768"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
<Representation id="1" bandwidth="124859" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="124634" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
|
@ -25,8 +25,9 @@
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
<S t="88064" d="33792"/>
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="32768"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
|
|
|
@ -17,12 +17,13 @@
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
|
||||||
<Representation id="1" bandwidth="125808" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="125598" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
<S t="88064" d="33792"/>
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="32768"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
|
|
|
@ -17,12 +17,13 @@
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
|
||||||
<Representation id="1" bandwidth="125337" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="125122" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
<S t="88064" d="33792"/>
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="32768"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
|
|
|
@ -15,14 +15,15 @@
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
<Representation id="1" bandwidth="125808" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="125598" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
|
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011"/>
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b"/>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
<S t="88064" d="33792"/>
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="32768"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
|
|
|
@ -13,12 +13,13 @@
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
<Representation id="1" bandwidth="122544" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="122308" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
<S t="88064" d="33792"/>
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="32768"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
|
|
|
@ -12,27 +12,19 @@
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
<Representation id="1" bandwidth="131035" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="143117" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="2048"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
</Period>
|
</Period>
|
||||||
<Period id="1" duration="PT0.734067S">
|
<Period id="1" duration="PT0.734067S">
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
|
||||||
<Representation id="1" bandwidth="108486" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
|
||||||
<SegmentTemplate timescale="44100" presentationTimeOffset="91230" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="3">
|
|
||||||
<SegmentTimeline>
|
|
||||||
<S t="88064" d="33792"/>
|
|
||||||
</SegmentTimeline>
|
|
||||||
</SegmentTemplate>
|
|
||||||
</Representation>
|
|
||||||
</AdaptationSet>
|
|
||||||
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" segmentAlignment="true" par="16:9">
|
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" segmentAlignment="true" par="16:9">
|
||||||
<Representation id="0" bandwidth="869044" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
|
<Representation id="0" bandwidth="869044" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
|
||||||
<SegmentTemplate timescale="30000" presentationTimeOffset="62061" initialization="output_video-init.mp4" media="output_video-$Number$.m4s" startNumber="3">
|
<SegmentTemplate timescale="30000" presentationTimeOffset="62061" initialization="output_video-init.mp4" media="output_video-$Number$.m4s" startNumber="3">
|
||||||
|
@ -42,5 +34,15 @@
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
|
<Representation id="1" bandwidth="105634" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
|
<SegmentTemplate timescale="44100" presentationTimeOffset="91230" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="4">
|
||||||
|
<SegmentTimeline>
|
||||||
|
<S t="91136" d="30720"/>
|
||||||
|
</SegmentTimeline>
|
||||||
|
</SegmentTemplate>
|
||||||
|
</Representation>
|
||||||
|
</AdaptationSet>
|
||||||
</Period>
|
</Period>
|
||||||
</MPD>
|
</MPD>
|
||||||
|
|
|
@ -13,12 +13,13 @@
|
||||||
</Representation>
|
</Representation>
|
||||||
</AdaptationSet>
|
</AdaptationSet>
|
||||||
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
|
||||||
<Representation id="1" bandwidth="122544" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="1" bandwidth="122308" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
<SegmentTemplate timescale="44100" initialization="output_audio-init.mp4" media="output_audio-$Number$.m4s" startNumber="1">
|
||||||
<SegmentTimeline>
|
<SegmentTimeline>
|
||||||
<S t="0" d="44032" r="1"/>
|
<S t="0" d="45056"/>
|
||||||
<S t="88064" d="33792"/>
|
<S t="45056" d="44032"/>
|
||||||
|
<S t="89088" d="32768"/>
|
||||||
</SegmentTimeline>
|
</SegmentTimeline>
|
||||||
</SegmentTemplate>
|
</SegmentTemplate>
|
||||||
</Representation>
|
</Representation>
|
||||||
|
|
7
packager/app/test/testdata/bear-640x360-av-mp4-master-cenc-ad_cues-golden.m3u8
vendored
Normal file
7
packager/app/test/testdata/bear-640x360-av-mp4-master-cenc-ad_cues-golden.m3u8
vendored
Normal file
|
@ -0,0 +1,7 @@
|
||||||
|
#EXTM3U
|
||||||
|
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
|
||||||
|
|
||||||
|
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
||||||
|
|
||||||
|
#EXT-X-STREAM-INF:BANDWIDTH=1152419,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
|
||||||
|
video.m3u8
|
|
@ -3,5 +3,5 @@
|
||||||
|
|
||||||
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
||||||
|
|
||||||
#EXT-X-STREAM-INF:BANDWIDTH=1111340,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
|
#EXT-X-STREAM-INF:BANDWIDTH=1111147,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
|
||||||
video.m3u8
|
video.m3u8
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
|
|
||||||
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio/audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio/audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
||||||
|
|
||||||
#EXT-X-STREAM-INF:BANDWIDTH=1105163,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
|
#EXT-X-STREAM-INF:BANDWIDTH=1105129,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group"
|
||||||
video/video.m3u8
|
video/video.m3u8
|
||||||
|
|
|
@ -32,7 +32,7 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="2" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="2" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="967-1034" timescale="44100">
|
<SegmentBase indexRange="967-1034" timescale="44100">
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
<ContentProtection schemeIdUri="urn:uuid:1077efec-c0b2-4d02-ace3-3c1e52e2fb4b">
|
||||||
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
<cenc:pssh>AAAANHBzc2gBAAAAEHfv7MCyTQKs4zweUuL7SwAAAAExMjM0NTY3ODkwMTIzNDU2AAAAAA==</cenc:pssh>
|
||||||
</ContentProtection>
|
</ContentProtection>
|
||||||
<Representation id="3" bandwidth="129185" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
<Representation id="3" bandwidth="129162" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
|
||||||
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
|
||||||
<BaseURL>output_audio.mp4</BaseURL>
|
<BaseURL>output_audio.mp4</BaseURL>
|
||||||
<SegmentBase indexRange="967-1034" timescale="44100">
|
<SegmentBase indexRange="967-1034" timescale="44100">
|
||||||
|
|
Binary file not shown.
|
@ -8,10 +8,10 @@
|
||||||
#EXTINF:1.010,
|
#EXTINF:1.010,
|
||||||
#EXT-X-BYTERANGE:24460@977
|
#EXT-X-BYTERANGE:24460@977
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXTINF:0.975,
|
#EXTINF:1.010,
|
||||||
#EXT-X-BYTERANGE:23899
|
#EXT-X-BYTERANGE:24747
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXTINF:0.766,
|
#EXTINF:0.731,
|
||||||
#EXT-X-BYTERANGE:18811
|
#EXT-X-BYTERANGE:17963
|
||||||
output_audio.mp4
|
output_audio.mp4
|
||||||
#EXT-X-ENDLIST
|
#EXT-X-ENDLIST
|
||||||
|
|
|
@ -3,5 +3,5 @@
|
||||||
|
|
||||||
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
#EXT-X-MEDIA:TYPE=AUDIO,URI="audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_0",AUTOSELECT=YES,CHANNELS="2"
|
||||||
|
|
||||||
#EXT-X-STREAM-INF:BANDWIDTH=1174135,CODECS="avc1.64001e,ec-3",RESOLUTION=640x360,AUDIO="default-audio-group"
|
#EXT-X-STREAM-INF:BANDWIDTH=1174212,CODECS="avc1.64001e,ec-3",RESOLUTION=640x360,AUDIO="default-audio-group"
|
||||||
video.m3u8
|
video.m3u8
|
||||||
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -49,7 +49,7 @@ bool MediaHandler::ValidateOutputStreamIndex(size_t stream_index) const {
|
||||||
return stream_index < num_input_streams_;
|
return stream_index < num_input_streams_;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status MediaHandler::Dispatch(std::unique_ptr<StreamData> stream_data) {
|
Status MediaHandler::Dispatch(std::unique_ptr<StreamData> stream_data) const {
|
||||||
size_t output_stream_index = stream_data->stream_index;
|
size_t output_stream_index = stream_data->stream_index;
|
||||||
auto handler_it = output_handlers_.find(output_stream_index);
|
auto handler_it = output_handlers_.find(output_stream_index);
|
||||||
if (handler_it == output_handlers_.end()) {
|
if (handler_it == output_handlers_.end()) {
|
||||||
|
|
|
@ -185,43 +185,54 @@ class MediaHandler {
|
||||||
|
|
||||||
/// Dispatch the stream data to downstream handlers. Note that
|
/// Dispatch the stream data to downstream handlers. Note that
|
||||||
/// stream_data.stream_index should be the output stream index.
|
/// stream_data.stream_index should be the output stream index.
|
||||||
Status Dispatch(std::unique_ptr<StreamData> stream_data);
|
Status Dispatch(std::unique_ptr<StreamData> stream_data) const;
|
||||||
|
|
||||||
/// Dispatch the stream info to downstream handlers.
|
/// Dispatch the stream info to downstream handlers.
|
||||||
Status DispatchStreamInfo(
|
Status DispatchStreamInfo(
|
||||||
size_t stream_index, std::shared_ptr<const StreamInfo> stream_info) {
|
size_t stream_index,
|
||||||
return Dispatch(StreamData::FromStreamInfo(stream_index, stream_info));
|
std::shared_ptr<const StreamInfo> stream_info) const {
|
||||||
|
return Dispatch(
|
||||||
|
StreamData::FromStreamInfo(stream_index, std::move(stream_info)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dispatch the media sample to downstream handlers.
|
/// Dispatch the media sample to downstream handlers.
|
||||||
Status DispatchMediaSample(
|
Status DispatchMediaSample(
|
||||||
size_t stream_index, std::shared_ptr<const MediaSample> media_sample) {
|
size_t stream_index,
|
||||||
return Dispatch(StreamData::FromMediaSample(stream_index, media_sample));
|
std::shared_ptr<const MediaSample> media_sample) const {
|
||||||
|
return Dispatch(
|
||||||
|
StreamData::FromMediaSample(stream_index, std::move(media_sample)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dispatch the text sample to downsream handlers.
|
/// Dispatch the text sample to downsream handlers.
|
||||||
// DispatchTextSample should only be override for testing.
|
// DispatchTextSample should only be override for testing.
|
||||||
Status DispatchTextSample(
|
Status DispatchTextSample(
|
||||||
size_t stream_index, std::shared_ptr<const TextSample> text_sample) {
|
size_t stream_index,
|
||||||
return Dispatch(StreamData::FromTextSample(stream_index, text_sample));
|
std::shared_ptr<const TextSample> text_sample) const {
|
||||||
|
return Dispatch(
|
||||||
|
StreamData::FromTextSample(stream_index, std::move(text_sample)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dispatch the segment info to downstream handlers.
|
/// Dispatch the segment info to downstream handlers.
|
||||||
Status DispatchSegmentInfo(
|
Status DispatchSegmentInfo(
|
||||||
size_t stream_index, std::shared_ptr<const SegmentInfo> segment_info) {
|
size_t stream_index,
|
||||||
return Dispatch(StreamData::FromSegmentInfo(stream_index, segment_info));
|
std::shared_ptr<const SegmentInfo> segment_info) const {
|
||||||
|
return Dispatch(
|
||||||
|
StreamData::FromSegmentInfo(stream_index, std::move(segment_info)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dispatch the scte35 event to downstream handlers.
|
/// Dispatch the scte35 event to downstream handlers.
|
||||||
Status DispatchScte35Event(size_t stream_index,
|
Status DispatchScte35Event(
|
||||||
std::shared_ptr<const Scte35Event> scte35_event) {
|
size_t stream_index,
|
||||||
return Dispatch(StreamData::FromScte35Event(stream_index, scte35_event));
|
std::shared_ptr<const Scte35Event> scte35_event) const {
|
||||||
|
return Dispatch(
|
||||||
|
StreamData::FromScte35Event(stream_index, std::move(scte35_event)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Dispatch the cue event to downstream handlers.
|
/// Dispatch the cue event to downstream handlers.
|
||||||
Status DispatchCueEvent(size_t stream_index,
|
Status DispatchCueEvent(size_t stream_index,
|
||||||
std::shared_ptr<const CueEvent> cue_event) {
|
std::shared_ptr<const CueEvent> cue_event) const {
|
||||||
return Dispatch(StreamData::FromCueEvent(stream_index, cue_event));
|
return Dispatch(
|
||||||
|
StreamData::FromCueEvent(stream_index, std::move(cue_event)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flush the downstream connected at the specified output stream index.
|
/// Flush the downstream connected at the specified output stream index.
|
||||||
|
|
|
@ -9,46 +9,38 @@
|
||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
|
|
||||||
#include "packager/base/logging.h"
|
#include "packager/base/logging.h"
|
||||||
#include "packager/base/threading/platform_thread.h"
|
|
||||||
#include "packager/media/base/media_sample.h"
|
#include "packager/media/base/media_sample.h"
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
namespace {
|
namespace {
|
||||||
int64_t kThreadIdUnset = -1;
|
const size_t kStreamIndex = 0;
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
ChunkingHandler::ChunkingHandler(const ChunkingParams& chunking_params)
|
ChunkingHandler::ChunkingHandler(const ChunkingParams& chunking_params)
|
||||||
: chunking_params_(chunking_params),
|
: chunking_params_(chunking_params) {
|
||||||
thread_id_(kThreadIdUnset),
|
|
||||||
media_sample_comparator_(this),
|
|
||||||
cached_media_sample_stream_data_(media_sample_comparator_) {
|
|
||||||
CHECK_NE(chunking_params.segment_duration_in_seconds, 0u);
|
CHECK_NE(chunking_params.segment_duration_in_seconds, 0u);
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkingHandler::~ChunkingHandler() {}
|
|
||||||
|
|
||||||
Status ChunkingHandler::InitializeInternal() {
|
Status ChunkingHandler::InitializeInternal() {
|
||||||
segment_info_.resize(num_input_streams());
|
if (num_input_streams() != 1 || next_output_stream_index() != 1) {
|
||||||
subsegment_info_.resize(num_input_streams());
|
return Status(error::INVALID_ARGUMENT,
|
||||||
time_scales_.resize(num_input_streams());
|
"Expects exactly one input and one output.");
|
||||||
last_sample_end_timestamps_.resize(num_input_streams());
|
}
|
||||||
num_cached_samples_.resize(num_input_streams());
|
|
||||||
return Status::OK;
|
return Status::OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ChunkingHandler::Process(std::unique_ptr<StreamData> stream_data) {
|
Status ChunkingHandler::Process(std::unique_ptr<StreamData> stream_data) {
|
||||||
switch (stream_data->stream_data_type) {
|
switch (stream_data->stream_data_type) {
|
||||||
case StreamDataType::kStreamInfo:
|
case StreamDataType::kStreamInfo:
|
||||||
return OnStreamInfo(stream_data->stream_index, stream_data->stream_info);
|
return OnStreamInfo(std::move(stream_data->stream_info));
|
||||||
case StreamDataType::kScte35Event:
|
case StreamDataType::kCueEvent:
|
||||||
return OnScte35Event(stream_data->stream_index,
|
return OnCueEvent(std::move(stream_data->cue_event));
|
||||||
stream_data->scte35_event);
|
|
||||||
case StreamDataType::kSegmentInfo:
|
case StreamDataType::kSegmentInfo:
|
||||||
VLOG(3) << "Droppping existing segment info.";
|
VLOG(3) << "Droppping existing segment info.";
|
||||||
return Status::OK;
|
return Status::OK;
|
||||||
case StreamDataType::kMediaSample:
|
case StreamDataType::kMediaSample:
|
||||||
return OnMediaSample(std::move(stream_data));
|
return OnMediaSample(std::move(stream_data->media_sample));
|
||||||
default:
|
default:
|
||||||
VLOG(3) << "Stream data type "
|
VLOG(3) << "Stream data type "
|
||||||
<< static_cast<int>(stream_data->stream_data_type) << " ignored.";
|
<< static_cast<int>(stream_data->stream_data_type) << " ignored.";
|
||||||
|
@ -57,289 +49,103 @@ Status ChunkingHandler::Process(std::unique_ptr<StreamData> stream_data) {
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ChunkingHandler::OnFlushRequest(size_t input_stream_index) {
|
Status ChunkingHandler::OnFlushRequest(size_t input_stream_index) {
|
||||||
// Process all cached samples.
|
Status status = EndSegmentIfStarted();
|
||||||
while (!cached_media_sample_stream_data_.empty()) {
|
|
||||||
Status status =
|
|
||||||
ProcessMediaSampleStreamData(*cached_media_sample_stream_data_.top());
|
|
||||||
if (!status.ok())
|
if (!status.ok())
|
||||||
return status;
|
return status;
|
||||||
--num_cached_samples_[cached_media_sample_stream_data_.top()->stream_index];
|
return FlushDownstream(kStreamIndex);
|
||||||
cached_media_sample_stream_data_.pop();
|
|
||||||
}
|
|
||||||
if (segment_info_[input_stream_index]) {
|
|
||||||
auto& segment_info = segment_info_[input_stream_index];
|
|
||||||
if (segment_info->start_timestamp != -1) {
|
|
||||||
segment_info->duration = last_sample_end_timestamps_[input_stream_index] -
|
|
||||||
segment_info->start_timestamp;
|
|
||||||
Status status =
|
|
||||||
DispatchSegmentInfo(input_stream_index, std::move(segment_info));
|
|
||||||
if (!status.ok())
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const size_t output_stream_index = input_stream_index;
|
|
||||||
return FlushDownstream(output_stream_index);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ChunkingHandler::OnStreamInfo(uint64_t stream_index,
|
Status ChunkingHandler::OnStreamInfo(std::shared_ptr<const StreamInfo> info) {
|
||||||
std::shared_ptr<const StreamInfo> info) {
|
time_scale_ = info->time_scale();
|
||||||
// Make sure the inputs come from the same thread.
|
|
||||||
const int64_t thread_id =
|
|
||||||
static_cast<int64_t>(base::PlatformThread::CurrentId());
|
|
||||||
|
|
||||||
int64_t expected = kThreadIdUnset;
|
|
||||||
if (!thread_id_.compare_exchange_strong(expected, thread_id) &&
|
|
||||||
expected != thread_id) {
|
|
||||||
return Status(error::CHUNKING_ERROR,
|
|
||||||
"Inputs should come from the same thread.");
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto time_scale = info->time_scale();
|
|
||||||
time_scales_[stream_index] = time_scale;
|
|
||||||
|
|
||||||
// The video stream is treated as the main stream. If there is only one
|
|
||||||
// stream, it is the main stream.
|
|
||||||
const bool is_main_stream =
|
|
||||||
main_stream_index_ == kInvalidStreamIndex &&
|
|
||||||
(info->stream_type() == kStreamVideo || num_input_streams() == 1);
|
|
||||||
if (is_main_stream) {
|
|
||||||
main_stream_index_ = stream_index;
|
|
||||||
segment_duration_ =
|
segment_duration_ =
|
||||||
chunking_params_.segment_duration_in_seconds * time_scale;
|
chunking_params_.segment_duration_in_seconds * time_scale_;
|
||||||
subsegment_duration_ =
|
subsegment_duration_ =
|
||||||
chunking_params_.subsegment_duration_in_seconds * time_scale;
|
chunking_params_.subsegment_duration_in_seconds * time_scale_;
|
||||||
} else if (info->stream_type() == kStreamVideo) {
|
return DispatchStreamInfo(kStreamIndex, std::move(info));
|
||||||
return Status(error::CHUNKING_ERROR,
|
|
||||||
"Only one video stream is allowed per chunking handler.");
|
|
||||||
}
|
|
||||||
|
|
||||||
return DispatchStreamInfo(stream_index, std::move(info));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ChunkingHandler::OnScte35Event(
|
Status ChunkingHandler::OnCueEvent(std::shared_ptr<const CueEvent> event) {
|
||||||
uint64_t stream_index,
|
Status status = EndSegmentIfStarted();
|
||||||
std::shared_ptr<const Scte35Event> event) {
|
|
||||||
if (stream_index == main_stream_index_) {
|
|
||||||
scte35_events_.push(std::move(event));
|
|
||||||
} else {
|
|
||||||
VLOG(3) << "Dropping scte35 event from non main stream.";
|
|
||||||
}
|
|
||||||
|
|
||||||
return Status::OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
Status ChunkingHandler::OnMediaSample(std::unique_ptr<StreamData> stream_data) {
|
|
||||||
DCHECK_EQ(StreamDataType::kMediaSample, stream_data->stream_data_type);
|
|
||||||
|
|
||||||
const size_t stream_index = stream_data->stream_index;
|
|
||||||
DCHECK_NE(time_scales_[stream_index], 0u)
|
|
||||||
<< "kStreamInfo should arrive before kMediaSample";
|
|
||||||
|
|
||||||
if (stream_index != main_stream_index_ &&
|
|
||||||
!stream_data->media_sample->is_key_frame()) {
|
|
||||||
return Status(error::CHUNKING_ERROR,
|
|
||||||
"All non video samples should be key frames.");
|
|
||||||
}
|
|
||||||
// The streams are expected to be roughly synchronized, so we don't expect
|
|
||||||
// to see a lot of samples from one stream but no samples from another
|
|
||||||
// stream.
|
|
||||||
// The value is kind of arbitrary here. For a 24fps video, it is ~40s.
|
|
||||||
const size_t kMaxCachedSamplesPerStream = 1000u;
|
|
||||||
if (num_cached_samples_[stream_index] >= kMaxCachedSamplesPerStream) {
|
|
||||||
LOG(ERROR) << "Streams are not synchronized:";
|
|
||||||
for (size_t i = 0; i < num_cached_samples_.size(); ++i)
|
|
||||||
LOG(ERROR) << " [Stream " << i << "] " << num_cached_samples_[i];
|
|
||||||
return Status(error::CHUNKING_ERROR, "Streams are not synchronized.");
|
|
||||||
}
|
|
||||||
|
|
||||||
cached_media_sample_stream_data_.push(std::move(stream_data));
|
|
||||||
++num_cached_samples_[stream_index];
|
|
||||||
|
|
||||||
// If we have cached samples from every stream, the first sample in
|
|
||||||
// |cached_media_samples_stream_data_| is guaranteed to be the earliest
|
|
||||||
// sample. Extract and process that sample.
|
|
||||||
if (std::all_of(num_cached_samples_.begin(), num_cached_samples_.end(),
|
|
||||||
[](size_t num_samples) { return num_samples > 0; })) {
|
|
||||||
while (true) {
|
|
||||||
const size_t top_stream_index =
|
|
||||||
cached_media_sample_stream_data_.top()->stream_index;
|
|
||||||
Status status =
|
|
||||||
ProcessMediaSampleStreamData(*cached_media_sample_stream_data_.top());
|
|
||||||
if (!status.ok())
|
if (!status.ok())
|
||||||
return status;
|
return status;
|
||||||
cached_media_sample_stream_data_.pop();
|
// Force start new segment after cue event.
|
||||||
if (--num_cached_samples_[top_stream_index] == 0)
|
segment_start_time_ = base::nullopt;
|
||||||
break;
|
return DispatchCueEvent(kStreamIndex, std::move(event));
|
||||||
}
|
|
||||||
}
|
|
||||||
return Status::OK;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ChunkingHandler::ProcessMainMediaSample(const MediaSample* sample) {
|
Status ChunkingHandler::OnMediaSample(
|
||||||
const bool is_key_frame = sample->is_key_frame();
|
std::shared_ptr<const MediaSample> sample) {
|
||||||
const int64_t timestamp = sample->dts();
|
DCHECK_NE(time_scale_, 0u) << "kStreamInfo should arrive before kMediaSample";
|
||||||
const int64_t time_scale = time_scales_[main_stream_index_];
|
|
||||||
const double dts_in_seconds = static_cast<double>(sample->dts()) / time_scale;
|
|
||||||
|
|
||||||
// Check if we need to terminate the current (sub)segment.
|
const int64_t timestamp = sample->dts();
|
||||||
bool new_segment = false;
|
|
||||||
bool new_subsegment = false;
|
bool started_new_segment = false;
|
||||||
std::shared_ptr<CueEvent> cue_event;
|
const bool can_start_new_segment =
|
||||||
if (is_key_frame || !chunking_params_.segment_sap_aligned) {
|
sample->is_key_frame() || !chunking_params_.segment_sap_aligned;
|
||||||
|
if (can_start_new_segment) {
|
||||||
const int64_t segment_index = timestamp / segment_duration_;
|
const int64_t segment_index = timestamp / segment_duration_;
|
||||||
if (segment_index != current_segment_index_) {
|
if (!segment_start_time_ || segment_index != current_segment_index_) {
|
||||||
current_segment_index_ = segment_index;
|
current_segment_index_ = segment_index;
|
||||||
// Reset subsegment index.
|
// Reset subsegment index.
|
||||||
current_subsegment_index_ = 0;
|
current_subsegment_index_ = 0;
|
||||||
new_segment = true;
|
|
||||||
}
|
|
||||||
// We use 'while' instead of 'if' to make sure to pop off multiple SCTE35
|
|
||||||
// events that may be very close to each other.
|
|
||||||
while (!scte35_events_.empty() &&
|
|
||||||
scte35_events_.top()->start_time_in_seconds <= dts_in_seconds) {
|
|
||||||
// For simplicity, don't change |current_segment_index_|.
|
|
||||||
current_subsegment_index_ = 0;
|
|
||||||
new_segment = true;
|
|
||||||
|
|
||||||
cue_event = std::make_shared<CueEvent>();
|
Status status = EndSegmentIfStarted();
|
||||||
cue_event->time_in_seconds =
|
|
||||||
static_cast<double>(sample->pts()) / time_scale;
|
|
||||||
cue_event->cue_data = scte35_events_.top()->cue_data;
|
|
||||||
LOG(INFO) << "Chunked at " << dts_in_seconds << " seconds for Ad Cue.";
|
|
||||||
|
|
||||||
scte35_events_.pop();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!new_segment && subsegment_duration_ > 0 &&
|
|
||||||
(is_key_frame || !chunking_params_.subsegment_sap_aligned)) {
|
|
||||||
const int64_t subsegment_index =
|
|
||||||
(timestamp - segment_info_[main_stream_index_]->start_timestamp) /
|
|
||||||
subsegment_duration_;
|
|
||||||
if (subsegment_index != current_subsegment_index_) {
|
|
||||||
current_subsegment_index_ = subsegment_index;
|
|
||||||
new_subsegment = true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Status status;
|
|
||||||
if (new_segment) {
|
|
||||||
status.Update(DispatchSegmentInfoForAllStreams());
|
|
||||||
segment_info_[main_stream_index_]->start_timestamp = timestamp;
|
|
||||||
|
|
||||||
if (cue_event)
|
|
||||||
status.Update(DispatchCueEventForAllStreams(std::move(cue_event)));
|
|
||||||
}
|
|
||||||
if (subsegment_duration_ > 0 && (new_segment || new_subsegment)) {
|
|
||||||
status.Update(DispatchSubsegmentInfoForAllStreams());
|
|
||||||
subsegment_info_[main_stream_index_]->start_timestamp = timestamp;
|
|
||||||
}
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
Status ChunkingHandler::ProcessMediaSampleStreamData(
|
|
||||||
const StreamData& media_sample_stream_data) {
|
|
||||||
const size_t stream_index = media_sample_stream_data.stream_index;
|
|
||||||
const auto sample = std::move(media_sample_stream_data.media_sample);
|
|
||||||
|
|
||||||
if (stream_index == main_stream_index_) {
|
|
||||||
Status status = ProcessMainMediaSample(sample.get());
|
|
||||||
if (!status.ok())
|
if (!status.ok())
|
||||||
return status;
|
return status;
|
||||||
|
segment_start_time_ = timestamp;
|
||||||
|
subsegment_start_time_ = timestamp;
|
||||||
|
started_new_segment = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!started_new_segment && IsSubsegmentEnabled()) {
|
||||||
|
const bool can_start_new_subsegment =
|
||||||
|
sample->is_key_frame() || !chunking_params_.subsegment_sap_aligned;
|
||||||
|
if (can_start_new_subsegment) {
|
||||||
|
const int64_t subsegment_index =
|
||||||
|
(timestamp - segment_start_time_.value()) / subsegment_duration_;
|
||||||
|
if (subsegment_index != current_subsegment_index_) {
|
||||||
|
current_subsegment_index_ = subsegment_index;
|
||||||
|
|
||||||
|
Status status = EndSubsegmentIfStarted();
|
||||||
|
if (!status.ok())
|
||||||
|
return status;
|
||||||
|
subsegment_start_time_ = timestamp;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
VLOG(3) << "Stream index: " << stream_index << " "
|
VLOG(3) << "Sample ts: " << timestamp << " "
|
||||||
<< "Sample ts: " << sample->dts() << " "
|
<< " duration: " << sample->duration() << " scale: " << time_scale_
|
||||||
<< " duration: " << sample->duration()
|
<< (segment_start_time_ ? " dispatch " : " discard ");
|
||||||
<< " scale: " << time_scales_[stream_index] << "\n"
|
|
||||||
<< " scale: " << time_scales_[main_stream_index_]
|
|
||||||
<< (segment_info_[stream_index] ? " dispatch " : " discard ");
|
|
||||||
// Discard samples before segment start. If the segment has started,
|
// Discard samples before segment start. If the segment has started,
|
||||||
// |segment_info_[stream_index]| won't be null.
|
// |segment_start_time_| won't be null.
|
||||||
if (!segment_info_[stream_index])
|
if (!segment_start_time_)
|
||||||
return Status::OK;
|
return Status::OK;
|
||||||
if (segment_info_[stream_index]->start_timestamp == -1)
|
last_sample_end_timestamp_ = timestamp + sample->duration();
|
||||||
segment_info_[stream_index]->start_timestamp = sample->dts();
|
return DispatchMediaSample(kStreamIndex, std::move(sample));
|
||||||
if (subsegment_info_[stream_index] &&
|
|
||||||
subsegment_info_[stream_index]->start_timestamp == -1) {
|
|
||||||
subsegment_info_[stream_index]->start_timestamp = sample->dts();
|
|
||||||
}
|
|
||||||
last_sample_end_timestamps_[stream_index] =
|
|
||||||
sample->dts() + sample->duration();
|
|
||||||
return DispatchMediaSample(stream_index, std::move(sample));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ChunkingHandler::DispatchSegmentInfoForAllStreams() {
|
Status ChunkingHandler::EndSegmentIfStarted() const {
|
||||||
Status status;
|
if (!segment_start_time_)
|
||||||
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
|
return Status::OK;
|
||||||
if (segment_info_[i] && segment_info_[i]->start_timestamp != -1) {
|
|
||||||
segment_info_[i]->duration =
|
auto segment_info = std::make_shared<SegmentInfo>();
|
||||||
last_sample_end_timestamps_[i] - segment_info_[i]->start_timestamp;
|
segment_info->start_timestamp = segment_start_time_.value();
|
||||||
status.Update(DispatchSegmentInfo(i, std::move(segment_info_[i])));
|
segment_info->duration =
|
||||||
}
|
last_sample_end_timestamp_ - segment_start_time_.value();
|
||||||
segment_info_[i].reset(new SegmentInfo);
|
return DispatchSegmentInfo(kStreamIndex, std::move(segment_info));
|
||||||
subsegment_info_[i].reset();
|
|
||||||
}
|
|
||||||
return status;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Status ChunkingHandler::DispatchSubsegmentInfoForAllStreams() {
|
Status ChunkingHandler::EndSubsegmentIfStarted() const {
|
||||||
Status status;
|
if (!subsegment_start_time_)
|
||||||
for (size_t i = 0; i < subsegment_info_.size() && status.ok(); ++i) {
|
return Status::OK;
|
||||||
if (subsegment_info_[i] && subsegment_info_[i]->start_timestamp != -1) {
|
|
||||||
subsegment_info_[i]->duration =
|
|
||||||
last_sample_end_timestamps_[i] - subsegment_info_[i]->start_timestamp;
|
|
||||||
status.Update(DispatchSegmentInfo(i, std::move(subsegment_info_[i])));
|
|
||||||
}
|
|
||||||
subsegment_info_[i].reset(new SegmentInfo);
|
|
||||||
subsegment_info_[i]->is_subsegment = true;
|
|
||||||
}
|
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
Status ChunkingHandler::DispatchCueEventForAllStreams(
|
auto subsegment_info = std::make_shared<SegmentInfo>();
|
||||||
std::shared_ptr<CueEvent> cue_event) {
|
subsegment_info->start_timestamp = subsegment_start_time_.value();
|
||||||
Status status;
|
subsegment_info->duration =
|
||||||
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
|
last_sample_end_timestamp_ - subsegment_start_time_.value();
|
||||||
status.Update(DispatchCueEvent(i, cue_event));
|
subsegment_info->is_subsegment = true;
|
||||||
}
|
return DispatchSegmentInfo(kStreamIndex, std::move(subsegment_info));
|
||||||
return status;
|
|
||||||
}
|
|
||||||
|
|
||||||
ChunkingHandler::MediaSampleTimestampGreater::MediaSampleTimestampGreater(
|
|
||||||
const ChunkingHandler* const chunking_handler)
|
|
||||||
: chunking_handler_(chunking_handler) {}
|
|
||||||
|
|
||||||
bool ChunkingHandler::MediaSampleTimestampGreater::operator()(
|
|
||||||
const std::unique_ptr<StreamData>& lhs,
|
|
||||||
const std::unique_ptr<StreamData>& rhs) const {
|
|
||||||
DCHECK(lhs);
|
|
||||||
DCHECK(rhs);
|
|
||||||
return GetSampleTimeInSeconds(*lhs) > GetSampleTimeInSeconds(*rhs);
|
|
||||||
}
|
|
||||||
|
|
||||||
double ChunkingHandler::MediaSampleTimestampGreater::GetSampleTimeInSeconds(
|
|
||||||
const StreamData& media_sample_stream_data) const {
|
|
||||||
const size_t stream_index = media_sample_stream_data.stream_index;
|
|
||||||
const auto& sample = media_sample_stream_data.media_sample;
|
|
||||||
DCHECK(sample);
|
|
||||||
// Order main samples by left boundary and non main samples by mid-point. This
|
|
||||||
// ensures non main samples are properly chunked, i.e. if the portion of the
|
|
||||||
// sample in the next chunk is bigger than the portion of the sample in the
|
|
||||||
// previous chunk, the sample is placed in the next chunk.
|
|
||||||
const uint64_t timestamp =
|
|
||||||
stream_index == chunking_handler_->main_stream_index_
|
|
||||||
? sample->dts()
|
|
||||||
: (sample->dts() + sample->duration() / 2);
|
|
||||||
return static_cast<double>(timestamp) /
|
|
||||||
chunking_handler_->time_scales_[stream_index];
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ChunkingHandler::Scte35EventTimestampGreater::operator()(
|
|
||||||
const std::shared_ptr<const Scte35Event>& lhs,
|
|
||||||
const std::shared_ptr<const Scte35Event>& rhs) const {
|
|
||||||
DCHECK(lhs);
|
|
||||||
DCHECK(rhs);
|
|
||||||
return lhs->start_time_in_seconds > rhs->start_time_in_seconds;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <queue>
|
#include <queue>
|
||||||
|
|
||||||
#include "packager/base/logging.h"
|
#include "packager/base/logging.h"
|
||||||
|
#include "packager/base/optional.h"
|
||||||
#include "packager/media/base/media_handler.h"
|
#include "packager/media/base/media_handler.h"
|
||||||
#include "packager/media/public/chunking_params.h"
|
#include "packager/media/public/chunking_params.h"
|
||||||
|
|
||||||
|
@ -19,9 +20,7 @@ namespace media {
|
||||||
|
|
||||||
/// ChunkingHandler splits the samples into segments / subsegments based on the
|
/// ChunkingHandler splits the samples into segments / subsegments based on the
|
||||||
/// specified chunking params.
|
/// specified chunking params.
|
||||||
/// This handler is a multi-in multi-out handler. If more than one input is
|
/// This handler is a one-in one-out handler.
|
||||||
/// provided, there should be one and only one video stream; also, all inputs
|
|
||||||
/// should come from the same thread and are synchronized.
|
|
||||||
/// There can be multiple chunking handler running in different threads or even
|
/// There can be multiple chunking handler running in different threads or even
|
||||||
/// different processes, we use the "consistent chunking algorithm" to make sure
|
/// different processes, we use the "consistent chunking algorithm" to make sure
|
||||||
/// the chunks in different streams are aligned without explicit communcating
|
/// the chunks in different streams are aligned without explicit communcating
|
||||||
|
@ -36,17 +35,11 @@ namespace media {
|
||||||
/// 2. Chunk only at the consistent chunkable boundary
|
/// 2. Chunk only at the consistent chunkable boundary
|
||||||
///
|
///
|
||||||
/// This algorithm will make sure the chunks from different video streams are
|
/// This algorithm will make sure the chunks from different video streams are
|
||||||
/// aligned if they have aligned GoPs. However, this algorithm will only work
|
/// aligned if they have aligned GoPs.
|
||||||
/// for video streams. To be able to chunk non video streams at similar
|
|
||||||
/// positions as video streams, ChunkingHandler is designed to accept one video
|
|
||||||
/// input and multiple non video inputs, the non video inputs are chunked when
|
|
||||||
/// the video input is chunked. If the inputs are synchronized - which is true
|
|
||||||
/// if the inputs come from the same demuxer, the video and non video chunks
|
|
||||||
/// are aligned.
|
|
||||||
class ChunkingHandler : public MediaHandler {
|
class ChunkingHandler : public MediaHandler {
|
||||||
public:
|
public:
|
||||||
explicit ChunkingHandler(const ChunkingParams& chunking_params);
|
explicit ChunkingHandler(const ChunkingParams& chunking_params);
|
||||||
~ChunkingHandler() override;
|
~ChunkingHandler() override = default;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
/// @name MediaHandler implementation overrides.
|
/// @name MediaHandler implementation overrides.
|
||||||
|
@ -62,84 +55,34 @@ class ChunkingHandler : public MediaHandler {
|
||||||
ChunkingHandler(const ChunkingHandler&) = delete;
|
ChunkingHandler(const ChunkingHandler&) = delete;
|
||||||
ChunkingHandler& operator=(const ChunkingHandler&) = delete;
|
ChunkingHandler& operator=(const ChunkingHandler&) = delete;
|
||||||
|
|
||||||
Status OnStreamInfo(uint64_t stream_index,
|
Status OnStreamInfo(std::shared_ptr<const StreamInfo> info);
|
||||||
std::shared_ptr<const StreamInfo> info);
|
Status OnCueEvent(std::shared_ptr<const CueEvent> event);
|
||||||
Status OnScte35Event(uint64_t stream_index,
|
Status OnMediaSample(std::shared_ptr<const MediaSample> sample);
|
||||||
std::shared_ptr<const Scte35Event> event);
|
|
||||||
|
|
||||||
Status OnMediaSample(std::unique_ptr<StreamData> stream_data);
|
Status EndSegmentIfStarted() const;
|
||||||
|
Status EndSubsegmentIfStarted() const;
|
||||||
|
|
||||||
// Processes main media sample and apply chunking if needed.
|
bool IsSubsegmentEnabled() {
|
||||||
Status ProcessMainMediaSample(const MediaSample* sample);
|
return subsegment_duration_ > 0 &&
|
||||||
|
subsegment_duration_ != segment_duration_;
|
||||||
// Processes and dispatches media sample.
|
}
|
||||||
Status ProcessMediaSampleStreamData(const StreamData& media_data);
|
|
||||||
|
|
||||||
// The (sub)segments are aligned and dispatched together.
|
|
||||||
Status DispatchSegmentInfoForAllStreams();
|
|
||||||
Status DispatchSubsegmentInfoForAllStreams();
|
|
||||||
Status DispatchCueEventForAllStreams(std::shared_ptr<CueEvent> cue_event);
|
|
||||||
|
|
||||||
const ChunkingParams chunking_params_;
|
const ChunkingParams chunking_params_;
|
||||||
|
|
||||||
// The inputs are expected to come from the same thread.
|
// Segment and subsegment duration in stream's time scale.
|
||||||
std::atomic<int64_t> thread_id_;
|
|
||||||
|
|
||||||
// The video stream is the main stream; if there is only one stream, it is the
|
|
||||||
// main stream. The chunking is based on the main stream.
|
|
||||||
const size_t kInvalidStreamIndex = static_cast<size_t>(-1);
|
|
||||||
size_t main_stream_index_ = kInvalidStreamIndex;
|
|
||||||
// Segment and subsegment duration in main stream's time scale.
|
|
||||||
int64_t segment_duration_ = 0;
|
int64_t segment_duration_ = 0;
|
||||||
int64_t subsegment_duration_ = 0;
|
int64_t subsegment_duration_ = 0;
|
||||||
|
|
||||||
class MediaSampleTimestampGreater {
|
|
||||||
public:
|
|
||||||
explicit MediaSampleTimestampGreater(
|
|
||||||
const ChunkingHandler* const chunking_handler);
|
|
||||||
|
|
||||||
// Comparison operator. Used by |media_samples_| priority queue below to
|
|
||||||
// sort the media samples.
|
|
||||||
bool operator()(const std::unique_ptr<StreamData>& lhs,
|
|
||||||
const std::unique_ptr<StreamData>& rhs) const;
|
|
||||||
|
|
||||||
private:
|
|
||||||
double GetSampleTimeInSeconds(
|
|
||||||
const StreamData& media_sample_stream_data) const;
|
|
||||||
|
|
||||||
const ChunkingHandler* const chunking_handler_ = nullptr;
|
|
||||||
};
|
|
||||||
MediaSampleTimestampGreater media_sample_comparator_;
|
|
||||||
// Caches media samples and sort the samples.
|
|
||||||
std::priority_queue<std::unique_ptr<StreamData>,
|
|
||||||
std::vector<std::unique_ptr<StreamData>>,
|
|
||||||
MediaSampleTimestampGreater>
|
|
||||||
cached_media_sample_stream_data_;
|
|
||||||
// Tracks number of cached samples in input streams.
|
|
||||||
std::vector<size_t> num_cached_samples_;
|
|
||||||
|
|
||||||
// Current segment index, useful to determine where to do chunking.
|
// Current segment index, useful to determine where to do chunking.
|
||||||
int64_t current_segment_index_ = -1;
|
int64_t current_segment_index_ = -1;
|
||||||
// Current subsegment index, useful to determine where to do chunking.
|
// Current subsegment index, useful to determine where to do chunking.
|
||||||
int64_t current_subsegment_index_ = -1;
|
int64_t current_subsegment_index_ = -1;
|
||||||
|
|
||||||
std::vector<std::shared_ptr<SegmentInfo>> segment_info_;
|
base::Optional<int64_t> segment_start_time_;
|
||||||
std::vector<std::shared_ptr<SegmentInfo>> subsegment_info_;
|
base::Optional<int64_t> subsegment_start_time_;
|
||||||
std::vector<uint32_t> time_scales_;
|
uint32_t time_scale_ = 0;
|
||||||
// The end timestamp of the last dispatched sample.
|
// The end timestamp of the last dispatched sample.
|
||||||
std::vector<int64_t> last_sample_end_timestamps_;
|
int64_t last_sample_end_timestamp_ = 0;
|
||||||
|
|
||||||
struct Scte35EventTimestampGreater {
|
|
||||||
bool operator()(const std::shared_ptr<const Scte35Event>& lhs,
|
|
||||||
const std::shared_ptr<const Scte35Event>& rhs) const;
|
|
||||||
};
|
|
||||||
// Captures all incoming SCTE35 events to identify chunking points. Events
|
|
||||||
// will be removed from this queue one at a time as soon as the correct
|
|
||||||
// chunking point is identified in the incoming samples.
|
|
||||||
std::priority_queue<std::shared_ptr<const Scte35Event>,
|
|
||||||
std::vector<std::shared_ptr<const Scte35Event>>,
|
|
||||||
Scte35EventTimestampGreater>
|
|
||||||
scte35_events_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
|
|
|
@ -18,12 +18,10 @@ using ::testing::IsEmpty;
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
namespace {
|
namespace {
|
||||||
const size_t kStreamIndex0 = 0;
|
const size_t kStreamIndex = 0;
|
||||||
const size_t kStreamIndex1 = 1;
|
|
||||||
const uint32_t kTimeScale0 = 800;
|
const uint32_t kTimeScale0 = 800;
|
||||||
const uint32_t kTimeScale1 = 1000;
|
const uint32_t kTimeScale1 = 1000;
|
||||||
const int64_t kDuration0 = 200;
|
const int64_t kDuration = 300;
|
||||||
const int64_t kDuration1 = 300;
|
|
||||||
const bool kKeyFrame = true;
|
const bool kKeyFrame = true;
|
||||||
const bool kIsSubsegment = true;
|
const bool kIsSubsegment = true;
|
||||||
const bool kEncrypted = true;
|
const bool kEncrypted = true;
|
||||||
|
@ -57,34 +55,34 @@ TEST_F(ChunkingHandlerTest, AudioNoSubsegmentsThenFlush) {
|
||||||
SetUpChunkingHandler(1, chunking_params);
|
SetUpChunkingHandler(1, chunking_params);
|
||||||
|
|
||||||
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
||||||
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
|
kStreamIndex, GetAudioStreamInfo(kTimeScale0))));
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted)));
|
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale0, !kEncrypted)));
|
||||||
|
|
||||||
for (int i = 0; i < 5; ++i) {
|
for (int i = 0; i < 5; ++i) {
|
||||||
ClearOutputStreamDataVector();
|
ClearOutputStreamDataVector();
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
|
kStreamIndex, GetMediaSample(i * kDuration, kDuration, kKeyFrame))));
|
||||||
// One output stream_data except when i == 3, which also has SegmentInfo.
|
// One output stream_data except when i == 3, which also has SegmentInfo.
|
||||||
if (i == 3) {
|
if (i == 3) {
|
||||||
EXPECT_THAT(GetOutputStreamDataVector(),
|
EXPECT_THAT(GetOutputStreamDataVector(),
|
||||||
ElementsAre(IsSegmentInfo(kStreamIndex0, 0, kDuration1 * 3,
|
ElementsAre(IsSegmentInfo(kStreamIndex, 0, kDuration * 3,
|
||||||
!kIsSubsegment, !kEncrypted),
|
!kIsSubsegment, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, i * kDuration1,
|
IsMediaSample(kStreamIndex, i * kDuration,
|
||||||
kDuration1, !kEncrypted)));
|
kDuration, !kEncrypted)));
|
||||||
} else {
|
} else {
|
||||||
EXPECT_THAT(GetOutputStreamDataVector(),
|
EXPECT_THAT(GetOutputStreamDataVector(),
|
||||||
ElementsAre(IsMediaSample(kStreamIndex0, i * kDuration1,
|
ElementsAre(IsMediaSample(kStreamIndex, i * kDuration,
|
||||||
kDuration1, !kEncrypted)));
|
kDuration, !kEncrypted)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ClearOutputStreamDataVector();
|
ClearOutputStreamDataVector();
|
||||||
ASSERT_OK(OnFlushRequest(kStreamIndex0));
|
ASSERT_OK(OnFlushRequest(kStreamIndex));
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
ElementsAre(IsSegmentInfo(kStreamIndex0, kDuration1 * 3, kDuration1 * 2,
|
ElementsAre(IsSegmentInfo(kStreamIndex, kDuration * 3, kDuration * 2,
|
||||||
!kIsSubsegment, !kEncrypted)));
|
!kIsSubsegment, !kEncrypted)));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,25 +93,24 @@ TEST_F(ChunkingHandlerTest, AudioWithSubsegments) {
|
||||||
SetUpChunkingHandler(1, chunking_params);
|
SetUpChunkingHandler(1, chunking_params);
|
||||||
|
|
||||||
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
||||||
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
|
kStreamIndex, GetAudioStreamInfo(kTimeScale0))));
|
||||||
for (int i = 0; i < 5; ++i) {
|
for (int i = 0; i < 5; ++i) {
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
|
kStreamIndex, GetMediaSample(i * kDuration, kDuration, kKeyFrame))));
|
||||||
}
|
}
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
ElementsAre(
|
ElementsAre(
|
||||||
IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted),
|
IsStreamInfo(kStreamIndex, kTimeScale0, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, 0, kDuration1, !kEncrypted),
|
IsMediaSample(kStreamIndex, 0, kDuration, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kDuration1, kDuration1, !kEncrypted),
|
IsMediaSample(kStreamIndex, kDuration, kDuration, !kEncrypted),
|
||||||
IsSegmentInfo(kStreamIndex0, 0, kDuration1 * 2, kIsSubsegment,
|
IsSegmentInfo(kStreamIndex, 0, kDuration * 2, kIsSubsegment,
|
||||||
!kEncrypted),
|
!kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, 2 * kDuration1, kDuration1, !kEncrypted),
|
IsMediaSample(kStreamIndex, 2 * kDuration, kDuration, !kEncrypted),
|
||||||
IsSegmentInfo(kStreamIndex0, 0, kDuration1 * 3, !kIsSubsegment,
|
IsSegmentInfo(kStreamIndex, 0, kDuration * 3, !kIsSubsegment,
|
||||||
!kEncrypted),
|
!kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, 3 * kDuration1, kDuration1, !kEncrypted),
|
IsMediaSample(kStreamIndex, 3 * kDuration, kDuration, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, 4 * kDuration1, kDuration1,
|
IsMediaSample(kStreamIndex, 4 * kDuration, kDuration, !kEncrypted)));
|
||||||
!kEncrypted)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
|
TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
|
||||||
|
@ -123,193 +120,79 @@ TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
|
||||||
SetUpChunkingHandler(1, chunking_params);
|
SetUpChunkingHandler(1, chunking_params);
|
||||||
|
|
||||||
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
||||||
kStreamIndex0, GetVideoStreamInfo(kTimeScale1))));
|
kStreamIndex, GetVideoStreamInfo(kTimeScale1))));
|
||||||
const int64_t kVideoStartTimestamp = 12345;
|
const int64_t kVideoStartTimestamp = 12345;
|
||||||
for (int i = 0; i < 6; ++i) {
|
for (int i = 0; i < 6; ++i) {
|
||||||
// Alternate key frame.
|
// Alternate key frame.
|
||||||
const bool is_key_frame = (i % 2) == 1;
|
const bool is_key_frame = (i % 2) == 1;
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
|
kStreamIndex, GetMediaSample(kVideoStartTimestamp + i * kDuration,
|
||||||
kDuration1, is_key_frame))));
|
kDuration, is_key_frame))));
|
||||||
}
|
}
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
ElementsAre(
|
ElementsAre(
|
||||||
IsStreamInfo(kStreamIndex0, kTimeScale1, !kEncrypted),
|
IsStreamInfo(kStreamIndex, kTimeScale1, !kEncrypted),
|
||||||
// The first samples @ kStartTimestamp is discarded - not key frame.
|
// The first samples @ kStartTimestamp is discarded - not key frame.
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration,
|
||||||
kDuration1, !kEncrypted),
|
kDuration, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 2,
|
||||||
kDuration1, !kEncrypted),
|
kDuration, !kEncrypted),
|
||||||
// The next segment boundary 13245 / 1000 != 12645 / 1000.
|
// The next segment boundary 13245 / 1000 != 12645 / 1000.
|
||||||
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp + kDuration1,
|
IsSegmentInfo(kStreamIndex, kVideoStartTimestamp + kDuration,
|
||||||
kDuration1 * 2, !kIsSubsegment, !kEncrypted),
|
kDuration * 2, !kIsSubsegment, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 3,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 3,
|
||||||
kDuration1, !kEncrypted),
|
kDuration, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 4,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 4,
|
||||||
kDuration1, !kEncrypted),
|
kDuration, !kEncrypted),
|
||||||
// The subsegment has duration kDuration1 * 2 since it can only
|
// The subsegment has duration kDuration * 2 since it can only
|
||||||
// terminate before key frame.
|
// terminate before key frame.
|
||||||
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 3,
|
IsSegmentInfo(kStreamIndex, kVideoStartTimestamp + kDuration * 3,
|
||||||
kDuration1 * 2, kIsSubsegment, !kEncrypted),
|
kDuration * 2, kIsSubsegment, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 5,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 5,
|
||||||
kDuration1, !kEncrypted)));
|
kDuration, !kEncrypted)));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(ChunkingHandlerTest, AudioAndVideo) {
|
TEST_F(ChunkingHandlerTest, CueEvent) {
|
||||||
ChunkingParams chunking_params;
|
|
||||||
chunking_params.segment_duration_in_seconds = 1;
|
|
||||||
chunking_params.subsegment_duration_in_seconds = 0.3;
|
|
||||||
SetUpChunkingHandler(2, chunking_params);
|
|
||||||
|
|
||||||
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
||||||
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
|
|
||||||
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
||||||
kStreamIndex1, GetVideoStreamInfo(kTimeScale1))));
|
|
||||||
EXPECT_THAT(
|
|
||||||
GetOutputStreamDataVector(),
|
|
||||||
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted),
|
|
||||||
IsStreamInfo(kStreamIndex1, kTimeScale1, !kEncrypted)));
|
|
||||||
ClearOutputStreamDataVector();
|
|
||||||
|
|
||||||
// Equivalent to 12345 in video timescale.
|
|
||||||
const int64_t kAudioStartTimestamp = 9876;
|
|
||||||
const int64_t kVideoStartTimestamp = 12345;
|
|
||||||
// Burst of audio and video samples. They will be properly ordered.
|
|
||||||
for (int i = 0; i < 5; ++i) {
|
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
|
||||||
kStreamIndex0, GetMediaSample(kAudioStartTimestamp + kDuration0 * i,
|
|
||||||
kDuration0, true))));
|
|
||||||
}
|
|
||||||
for (int i = 0; i < 5; ++i) {
|
|
||||||
// Alternate key frame.
|
|
||||||
const bool is_key_frame = (i % 2) == 1;
|
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
|
||||||
kStreamIndex1, GetMediaSample(kVideoStartTimestamp + kDuration1 * i,
|
|
||||||
kDuration1, is_key_frame))));
|
|
||||||
}
|
|
||||||
|
|
||||||
EXPECT_THAT(
|
|
||||||
GetOutputStreamDataVector(),
|
|
||||||
ElementsAre(
|
|
||||||
// The first samples @ kStartTimestamp is discarded - not key frame.
|
|
||||||
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1,
|
|
||||||
kDuration1, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0,
|
|
||||||
kDuration0, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 2,
|
|
||||||
kDuration1, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 2,
|
|
||||||
kDuration0, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 3,
|
|
||||||
kDuration0, !kEncrypted),
|
|
||||||
// The audio segment is terminated together with video stream.
|
|
||||||
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0,
|
|
||||||
kDuration0 * 3, !kIsSubsegment, !kEncrypted),
|
|
||||||
// The next segment boundary 13245 / 1000 != 12645 / 1000.
|
|
||||||
IsSegmentInfo(kStreamIndex1, kVideoStartTimestamp + kDuration1,
|
|
||||||
kDuration1 * 2, !kIsSubsegment, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
|
|
||||||
kDuration1, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
|
|
||||||
kDuration0, !kEncrypted)));
|
|
||||||
ClearOutputStreamDataVector();
|
|
||||||
|
|
||||||
// The side comments below show the equivalent timestamp in video timescale.
|
|
||||||
// The audio and video are made ~aligned.
|
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
|
||||||
kStreamIndex0,
|
|
||||||
GetMediaSample(kAudioStartTimestamp + kDuration0 * 5, kDuration0,
|
|
||||||
true)))); // 13595
|
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
|
||||||
kStreamIndex1,
|
|
||||||
GetMediaSample(kVideoStartTimestamp + kDuration1 * 5, kDuration1,
|
|
||||||
true)))); // 13845
|
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
|
||||||
kStreamIndex0,
|
|
||||||
GetMediaSample(kAudioStartTimestamp + kDuration0 * 6, kDuration0,
|
|
||||||
true)))); // 13845
|
|
||||||
// This expectation are separated from the expectation above because
|
|
||||||
// ElementsAre supports at most 10 elements.
|
|
||||||
EXPECT_THAT(
|
|
||||||
GetOutputStreamDataVector(),
|
|
||||||
ElementsAre(
|
|
||||||
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 4,
|
|
||||||
kDuration1, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 5,
|
|
||||||
kDuration0, !kEncrypted),
|
|
||||||
// Audio is terminated along with video below.
|
|
||||||
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
|
|
||||||
kDuration0 * 2, kIsSubsegment, !kEncrypted),
|
|
||||||
// The subsegment has duration kDuration1 * 2 since it can only
|
|
||||||
// terminate before key frame.
|
|
||||||
IsSegmentInfo(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
|
|
||||||
kDuration1 * 2, kIsSubsegment, !kEncrypted),
|
|
||||||
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 5,
|
|
||||||
kDuration1, !kEncrypted)));
|
|
||||||
|
|
||||||
ClearOutputStreamDataVector();
|
|
||||||
ASSERT_OK(OnFlushRequest(kStreamIndex0));
|
|
||||||
EXPECT_THAT(
|
|
||||||
GetOutputStreamDataVector(),
|
|
||||||
ElementsAre(
|
|
||||||
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 6,
|
|
||||||
kDuration0, !kEncrypted),
|
|
||||||
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
|
|
||||||
kDuration0 * 3, !kIsSubsegment, !kEncrypted)));
|
|
||||||
|
|
||||||
ClearOutputStreamDataVector();
|
|
||||||
ASSERT_OK(OnFlushRequest(kStreamIndex1));
|
|
||||||
EXPECT_THAT(GetOutputStreamDataVector(),
|
|
||||||
ElementsAre(IsSegmentInfo(
|
|
||||||
kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
|
|
||||||
kDuration1 * 3, !kIsSubsegment, !kEncrypted)));
|
|
||||||
|
|
||||||
// Flush again will do nothing.
|
|
||||||
ClearOutputStreamDataVector();
|
|
||||||
ASSERT_OK(OnFlushRequest(kStreamIndex0));
|
|
||||||
ASSERT_OK(OnFlushRequest(kStreamIndex1));
|
|
||||||
EXPECT_THAT(GetOutputStreamDataVector(), IsEmpty());
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_F(ChunkingHandlerTest, Scte35Event) {
|
|
||||||
ChunkingParams chunking_params;
|
ChunkingParams chunking_params;
|
||||||
chunking_params.segment_duration_in_seconds = 1;
|
chunking_params.segment_duration_in_seconds = 1;
|
||||||
chunking_params.subsegment_duration_in_seconds = 0.5;
|
chunking_params.subsegment_duration_in_seconds = 0.5;
|
||||||
SetUpChunkingHandler(1, chunking_params);
|
SetUpChunkingHandler(1, chunking_params);
|
||||||
|
|
||||||
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
||||||
kStreamIndex0, GetVideoStreamInfo(kTimeScale1))));
|
kStreamIndex, GetVideoStreamInfo(kTimeScale1))));
|
||||||
|
|
||||||
const int64_t kVideoStartTimestamp = 12345;
|
const int64_t kVideoStartTimestamp = 12345;
|
||||||
const double kScte35TimeInSeconds =
|
const double kCueTimeInSeconds =
|
||||||
static_cast<double>(kVideoStartTimestamp + kDuration1) / kTimeScale1;
|
static_cast<double>(kVideoStartTimestamp + kDuration) / kTimeScale1;
|
||||||
|
|
||||||
auto scte35_event = std::make_shared<Scte35Event>();
|
auto cue_event = std::make_shared<CueEvent>();
|
||||||
scte35_event->start_time_in_seconds = kScte35TimeInSeconds;
|
cue_event->time_in_seconds = kCueTimeInSeconds;
|
||||||
ASSERT_OK(Process(StreamData::FromScte35Event(kStreamIndex0, scte35_event)));
|
|
||||||
|
|
||||||
for (int i = 0; i < 3; ++i) {
|
for (int i = 0; i < 3; ++i) {
|
||||||
const bool is_key_frame = true;
|
const bool is_key_frame = true;
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
|
kStreamIndex, GetMediaSample(kVideoStartTimestamp + i * kDuration,
|
||||||
kDuration1, is_key_frame))));
|
kDuration, is_key_frame))));
|
||||||
|
if (i == 0) {
|
||||||
|
ASSERT_OK(Process(StreamData::FromCueEvent(kStreamIndex, cue_event)));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
ElementsAre(
|
ElementsAre(
|
||||||
IsStreamInfo(kStreamIndex0, kTimeScale1, !kEncrypted),
|
IsStreamInfo(kStreamIndex, kTimeScale1, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp, kDuration1,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp, kDuration,
|
||||||
!kEncrypted),
|
!kEncrypted),
|
||||||
// A new segment is created due to the existance of Cue.
|
// A new segment is created due to the existance of Cue.
|
||||||
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp, kDuration1,
|
IsSegmentInfo(kStreamIndex, kVideoStartTimestamp, kDuration,
|
||||||
!kIsSubsegment, !kEncrypted),
|
!kIsSubsegment, !kEncrypted),
|
||||||
IsCueEvent(kStreamIndex0, kScte35TimeInSeconds),
|
IsCueEvent(kStreamIndex, kCueTimeInSeconds),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 1,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 1,
|
||||||
kDuration1, !kEncrypted),
|
kDuration, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,
|
IsMediaSample(kStreamIndex, kVideoStartTimestamp + kDuration * 2,
|
||||||
kDuration1, !kEncrypted)));
|
kDuration, !kEncrypted)));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
|
|
|
@ -20,7 +20,18 @@ double TimeInSeconds(const StreamInfo& info, const StreamData& data) {
|
||||||
switch (data.stream_data_type) {
|
switch (data.stream_data_type) {
|
||||||
case StreamDataType::kMediaSample:
|
case StreamDataType::kMediaSample:
|
||||||
time_scale = info.time_scale();
|
time_scale = info.time_scale();
|
||||||
|
if (info.stream_type() == kStreamAudio) {
|
||||||
|
// Return the start time for video and mid-point for audio, so that for
|
||||||
|
// an audio sample, if the portion of the sample after the cue point is
|
||||||
|
// bigger than the portion of the sample before the cue point, the
|
||||||
|
// sample is placed after the cue.
|
||||||
|
// It does not matter for text samples as text samples will be cut at
|
||||||
|
// cue point.
|
||||||
|
scaled_time =
|
||||||
|
data.media_sample->pts() + data.media_sample->duration() / 2;
|
||||||
|
} else {
|
||||||
scaled_time = data.media_sample->pts();
|
scaled_time = data.media_sample->pts();
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
case StreamDataType::kTextSample:
|
case StreamDataType::kTextSample:
|
||||||
// Text is always in MS but the stream info time scale is 0.
|
// Text is always in MS but the stream info time scale is 0.
|
||||||
|
@ -80,7 +91,7 @@ Status CueAlignmentHandler::OnFlushRequest(size_t stream_index) {
|
||||||
if (!stream_state.samples.empty()) {
|
if (!stream_state.samples.empty()) {
|
||||||
LOG(WARNING) << "Unexpected data seen on stream " << i;
|
LOG(WARNING) << "Unexpected data seen on stream " << i;
|
||||||
while (!stream_state.samples.empty()) {
|
while (!stream_state.samples.empty()) {
|
||||||
Status status(Dispatch(std::move(stream_state.samples.front())));
|
Status status = Dispatch(std::move(stream_state.samples.front()));
|
||||||
if (!status.ok())
|
if (!status.ok())
|
||||||
return status;
|
return status;
|
||||||
stream_state.samples.pop_front();
|
stream_state.samples.pop_front();
|
||||||
|
@ -141,7 +152,7 @@ Status CueAlignmentHandler::OnSample(std::unique_ptr<StreamData> sample) {
|
||||||
|
|
||||||
// Accept the sample. This will output it if it comes before the hint point or
|
// Accept the sample. This will output it if it comes before the hint point or
|
||||||
// will cache it if it comes after the hint point.
|
// will cache it if it comes after the hint point.
|
||||||
Status status(AcceptSample(std::move(sample), &stream_state));
|
Status status = AcceptSample(std::move(sample), &stream_state);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
@ -180,6 +191,8 @@ Status CueAlignmentHandler::UseNewSyncPoint(
|
||||||
// No stream should be so out of sync with the others that they would
|
// No stream should be so out of sync with the others that they would
|
||||||
// still be working on an old cue.
|
// still be working on an old cue.
|
||||||
if (stream_state.cue) {
|
if (stream_state.cue) {
|
||||||
|
// TODO(kqyang): Could this happen for text when there are no text samples
|
||||||
|
// between the two cues?
|
||||||
LOG(ERROR) << "Found two cue events that are too close together. One at "
|
LOG(ERROR) << "Found two cue events that are too close together. One at "
|
||||||
<< stream_state.cue->time_in_seconds << " and the other at "
|
<< stream_state.cue->time_in_seconds << " and the other at "
|
||||||
<< new_sync->time_in_seconds;
|
<< new_sync->time_in_seconds;
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
#include "packager/media/base/muxer_options.h"
|
#include "packager/media/base/muxer_options.h"
|
||||||
#include "packager/media/base/muxer_util.h"
|
#include "packager/media/base/muxer_util.h"
|
||||||
#include "packager/media/chunking/chunking_handler.h"
|
#include "packager/media/chunking/chunking_handler.h"
|
||||||
|
#include "packager/media/chunking/cue_alignment_handler.h"
|
||||||
#include "packager/media/crypto/encryption_handler.h"
|
#include "packager/media/crypto/encryption_handler.h"
|
||||||
#include "packager/media/demuxer/demuxer.h"
|
#include "packager/media/demuxer/demuxer.h"
|
||||||
#include "packager/media/event/muxer_listener_factory.h"
|
#include "packager/media/event/muxer_listener_factory.h"
|
||||||
|
@ -55,6 +56,7 @@ namespace shaka {
|
||||||
using media::Demuxer;
|
using media::Demuxer;
|
||||||
using media::KeySource;
|
using media::KeySource;
|
||||||
using media::MuxerOptions;
|
using media::MuxerOptions;
|
||||||
|
using media::SyncPointQueue;
|
||||||
|
|
||||||
namespace media {
|
namespace media {
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -375,8 +377,15 @@ std::shared_ptr<MediaHandler> CreateEncryptionHandler(
|
||||||
Status CreateMp4ToMp4TextJob(const StreamDescriptor& stream,
|
Status CreateMp4ToMp4TextJob(const StreamDescriptor& stream,
|
||||||
const PackagingParams& packaging_params,
|
const PackagingParams& packaging_params,
|
||||||
std::unique_ptr<MuxerListener> muxer_listener,
|
std::unique_ptr<MuxerListener> muxer_listener,
|
||||||
|
SyncPointQueue* sync_points,
|
||||||
MuxerFactory* muxer_factory,
|
MuxerFactory* muxer_factory,
|
||||||
std::shared_ptr<OriginHandler>* root) {
|
std::shared_ptr<OriginHandler>* root) {
|
||||||
|
// TODO(kqyang): This need to be integrated back to media pipeline since we
|
||||||
|
// may want to get not only text streams from the demuxer, in which case, the
|
||||||
|
// same demuxer should be used to get all streams instead of having a demuxer
|
||||||
|
// specifically for text.
|
||||||
|
// TODO(kqyang): Support Cue Alignment if |sync_points| is not null.
|
||||||
|
|
||||||
Status status;
|
Status status;
|
||||||
std::shared_ptr<Demuxer> demuxer;
|
std::shared_ptr<Demuxer> demuxer;
|
||||||
|
|
||||||
|
@ -385,14 +394,15 @@ Status CreateMp4ToMp4TextJob(const StreamDescriptor& stream,
|
||||||
demuxer->SetLanguageOverride(stream.stream_selector, stream.language);
|
demuxer->SetLanguageOverride(stream.stream_selector, stream.language);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<MediaHandler> chunker(
|
auto chunker =
|
||||||
new ChunkingHandler(packaging_params.chunking_params));
|
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
|
||||||
std::shared_ptr<Muxer> muxer =
|
std::shared_ptr<Muxer> muxer =
|
||||||
muxer_factory->CreateMuxer(GetOutputFormat(stream), stream);
|
muxer_factory->CreateMuxer(GetOutputFormat(stream), stream);
|
||||||
muxer->SetMuxerListener(std::move(muxer_listener));
|
muxer->SetMuxerListener(std::move(muxer_listener));
|
||||||
|
|
||||||
status.Update(chunker->AddHandler(std::move(muxer)));
|
status.Update(chunker->AddHandler(std::move(muxer)));
|
||||||
status.Update(demuxer->SetHandler(stream.stream_selector, chunker));
|
status.Update(
|
||||||
|
demuxer->SetHandler(stream.stream_selector, std::move(chunker)));
|
||||||
|
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
@ -400,7 +410,10 @@ Status CreateMp4ToMp4TextJob(const StreamDescriptor& stream,
|
||||||
Status CreateHlsTextJob(const StreamDescriptor& stream,
|
Status CreateHlsTextJob(const StreamDescriptor& stream,
|
||||||
const PackagingParams& packaging_params,
|
const PackagingParams& packaging_params,
|
||||||
std::unique_ptr<MuxerListener> muxer_listener,
|
std::unique_ptr<MuxerListener> muxer_listener,
|
||||||
|
SyncPointQueue* sync_points,
|
||||||
JobManager* job_manager) {
|
JobManager* job_manager) {
|
||||||
|
// TODO(kqyang): Support Cue Alignment if |sync_points| is not null.
|
||||||
|
|
||||||
DCHECK(muxer_listener);
|
DCHECK(muxer_listener);
|
||||||
DCHECK(job_manager);
|
DCHECK(job_manager);
|
||||||
|
|
||||||
|
@ -421,9 +434,8 @@ Status CreateHlsTextJob(const StreamDescriptor& stream,
|
||||||
MuxerOptions muxer_options = CreateMuxerOptions(stream, packaging_params);
|
MuxerOptions muxer_options = CreateMuxerOptions(stream, packaging_params);
|
||||||
muxer_options.bandwidth = stream.bandwidth ? stream.bandwidth : 256;
|
muxer_options.bandwidth = stream.bandwidth ? stream.bandwidth : 256;
|
||||||
|
|
||||||
std::shared_ptr<WebVttSegmentedOutputHandler> output(
|
auto output = std::make_shared<WebVttSegmentedOutputHandler>(
|
||||||
new WebVttSegmentedOutputHandler(muxer_options,
|
muxer_options, std::move(muxer_listener));
|
||||||
std::move(muxer_listener)));
|
|
||||||
|
|
||||||
std::unique_ptr<FileReader> reader;
|
std::unique_ptr<FileReader> reader;
|
||||||
Status open_status = FileReader::Open(stream.input, &reader);
|
Status open_status = FileReader::Open(stream.input, &reader);
|
||||||
|
@ -431,10 +443,9 @@ Status CreateHlsTextJob(const StreamDescriptor& stream,
|
||||||
return open_status;
|
return open_status;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<OriginHandler> parser(
|
auto parser =
|
||||||
new WebVttParser(std::move(reader), stream.language));
|
std::make_shared<WebVttParser>(std::move(reader), stream.language);
|
||||||
std::shared_ptr<MediaHandler> segmenter(
|
auto segmenter = std::make_shared<WebVttSegmenter>(segment_length_in_ms);
|
||||||
new WebVttSegmenter(segment_length_in_ms));
|
|
||||||
|
|
||||||
// Build in reverse to allow us to move the pointers.
|
// Build in reverse to allow us to move the pointers.
|
||||||
Status status;
|
Status status;
|
||||||
|
@ -451,8 +462,11 @@ Status CreateHlsTextJob(const StreamDescriptor& stream,
|
||||||
Status CreateWebVttToMp4TextJob(const StreamDescriptor& stream,
|
Status CreateWebVttToMp4TextJob(const StreamDescriptor& stream,
|
||||||
const PackagingParams& packaging_params,
|
const PackagingParams& packaging_params,
|
||||||
std::unique_ptr<MuxerListener> muxer_listener,
|
std::unique_ptr<MuxerListener> muxer_listener,
|
||||||
|
SyncPointQueue* sync_points,
|
||||||
MuxerFactory* muxer_factory,
|
MuxerFactory* muxer_factory,
|
||||||
std::shared_ptr<OriginHandler>* root) {
|
std::shared_ptr<OriginHandler>* root) {
|
||||||
|
// TODO(kqyang): Support Cue Alignment if |sync_points| is not null.
|
||||||
|
|
||||||
Status status;
|
Status status;
|
||||||
std::unique_ptr<FileReader> reader;
|
std::unique_ptr<FileReader> reader;
|
||||||
status = FileReader::Open(stream.input, &reader);
|
status = FileReader::Open(stream.input, &reader);
|
||||||
|
@ -461,11 +475,11 @@ Status CreateWebVttToMp4TextJob(const StreamDescriptor& stream,
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<OriginHandler> parser(
|
auto parser =
|
||||||
new WebVttParser(std::move(reader), stream.language));
|
std::make_shared<WebVttParser>(std::move(reader), stream.language);
|
||||||
std::shared_ptr<MediaHandler> text_to_mp4(new WebVttToMp4Handler);
|
auto text_to_mp4 = std::make_shared<WebVttToMp4Handler>();
|
||||||
std::shared_ptr<MediaHandler> chunker(
|
auto chunker =
|
||||||
new ChunkingHandler(packaging_params.chunking_params));
|
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
|
||||||
std::shared_ptr<Muxer> muxer =
|
std::shared_ptr<Muxer> muxer =
|
||||||
muxer_factory->CreateMuxer(GetOutputFormat(stream), stream);
|
muxer_factory->CreateMuxer(GetOutputFormat(stream), stream);
|
||||||
muxer->SetMuxerListener(std::move(muxer_listener));
|
muxer->SetMuxerListener(std::move(muxer_listener));
|
||||||
|
@ -482,6 +496,7 @@ Status CreateWebVttToMp4TextJob(const StreamDescriptor& stream,
|
||||||
Status CreateTextJobs(
|
Status CreateTextJobs(
|
||||||
const std::vector<std::reference_wrapper<const StreamDescriptor>>& streams,
|
const std::vector<std::reference_wrapper<const StreamDescriptor>>& streams,
|
||||||
const PackagingParams& packaging_params,
|
const PackagingParams& packaging_params,
|
||||||
|
SyncPointQueue* sync_points,
|
||||||
MuxerListenerFactory* muxer_listener_factory,
|
MuxerListenerFactory* muxer_listener_factory,
|
||||||
MuxerFactory* muxer_factory,
|
MuxerFactory* muxer_factory,
|
||||||
MpdNotifier* mpd_notifier,
|
MpdNotifier* mpd_notifier,
|
||||||
|
@ -498,14 +513,14 @@ Status CreateTextJobs(
|
||||||
|
|
||||||
switch (DetermineContainerFromFileName(stream.input)) {
|
switch (DetermineContainerFromFileName(stream.input)) {
|
||||||
case CONTAINER_WEBVTT:
|
case CONTAINER_WEBVTT:
|
||||||
status.Update(CreateWebVttToMp4TextJob(stream, packaging_params,
|
status.Update(CreateWebVttToMp4TextJob(
|
||||||
std::move(muxer_listener),
|
stream, packaging_params, std::move(muxer_listener), sync_points,
|
||||||
muxer_factory, &root));
|
muxer_factory, &root));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case CONTAINER_MOV:
|
case CONTAINER_MOV:
|
||||||
status.Update(CreateMp4ToMp4TextJob(stream, packaging_params,
|
status.Update(CreateMp4ToMp4TextJob(
|
||||||
std::move(muxer_listener),
|
stream, packaging_params, std::move(muxer_listener), sync_points,
|
||||||
muxer_factory, &root));
|
muxer_factory, &root));
|
||||||
break;
|
break;
|
||||||
|
|
||||||
|
@ -543,8 +558,9 @@ Status CreateTextJobs(
|
||||||
// If we are outputting to HLS, then create the HLS test pipeline that
|
// If we are outputting to HLS, then create the HLS test pipeline that
|
||||||
// will create segmented text output.
|
// will create segmented text output.
|
||||||
if (hls_listener) {
|
if (hls_listener) {
|
||||||
Status status = CreateHlsTextJob(stream, packaging_params,
|
Status status =
|
||||||
std::move(hls_listener), job_manager);
|
CreateHlsTextJob(stream, packaging_params, std::move(hls_listener),
|
||||||
|
sync_points, job_manager);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
@ -592,6 +608,7 @@ Status CreateAudioVideoJobs(
|
||||||
const std::vector<std::reference_wrapper<const StreamDescriptor>>& streams,
|
const std::vector<std::reference_wrapper<const StreamDescriptor>>& streams,
|
||||||
const PackagingParams& packaging_params,
|
const PackagingParams& packaging_params,
|
||||||
KeySource* encryption_key_source,
|
KeySource* encryption_key_source,
|
||||||
|
SyncPointQueue* sync_points,
|
||||||
MuxerListenerFactory* muxer_listener_factory,
|
MuxerListenerFactory* muxer_listener_factory,
|
||||||
MuxerFactory* muxer_factory,
|
MuxerFactory* muxer_factory,
|
||||||
JobManager* job_manager) {
|
JobManager* job_manager) {
|
||||||
|
@ -601,14 +618,14 @@ Status CreateAudioVideoJobs(
|
||||||
|
|
||||||
// Demuxers are shared among all streams with the same input.
|
// Demuxers are shared among all streams with the same input.
|
||||||
std::shared_ptr<Demuxer> demuxer;
|
std::shared_ptr<Demuxer> demuxer;
|
||||||
// Chunkers can be shared among all streams with the same input (except for
|
// When |sync_points| is not null, there should be one CueAlignmentHandler per
|
||||||
// WVM files), which allows samples from the same input to be synced when
|
// input. All CueAlignmentHandler shares the same |sync_points|, which allows
|
||||||
// doing chunking.
|
// sync points / cues to be aligned across streams, whether they are from the
|
||||||
std::shared_ptr<MediaHandler> chunker;
|
// same input or not.
|
||||||
bool is_wvm_file = false;
|
std::shared_ptr<CueAlignmentHandler> cue_aligner;
|
||||||
// Replicators are shared among all streams with the same input and stream
|
// Replicators are shared among all streams with the same input and stream
|
||||||
// selector.
|
// selector.
|
||||||
std::shared_ptr<MediaHandler> replicator;
|
std::shared_ptr<Replicator> replicator;
|
||||||
|
|
||||||
std::string previous_input;
|
std::string previous_input;
|
||||||
std::string previous_selector;
|
std::string previous_selector;
|
||||||
|
@ -624,15 +641,8 @@ Status CreateAudioVideoJobs(
|
||||||
|
|
||||||
job_manager->Add("RemuxJob", demuxer);
|
job_manager->Add("RemuxJob", demuxer);
|
||||||
|
|
||||||
// Share chunkers among all streams with the same input except for WVM
|
if (sync_points)
|
||||||
// file, which may contain multiple video files and the samples may not be
|
cue_aligner = std::make_shared<CueAlignmentHandler>(sync_points);
|
||||||
// interleaved either.
|
|
||||||
is_wvm_file =
|
|
||||||
DetermineContainerFromFileName(stream.input) == CONTAINER_WVM;
|
|
||||||
if (!is_wvm_file) {
|
|
||||||
chunker =
|
|
||||||
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!stream.language.empty()) {
|
if (!stream.language.empty()) {
|
||||||
|
@ -651,16 +661,8 @@ Status CreateAudioVideoJobs(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (new_stream) {
|
if (new_stream) {
|
||||||
std::shared_ptr<MediaHandler> ad_cue_generator;
|
auto chunker =
|
||||||
if (!packaging_params.ad_cue_generator_params.cue_points.empty()) {
|
|
||||||
ad_cue_generator = std::make_shared<AdCueGenerator>(
|
|
||||||
packaging_params.ad_cue_generator_params);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_wvm_file) {
|
|
||||||
chunker =
|
|
||||||
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
|
std::make_shared<ChunkingHandler>(packaging_params.chunking_params);
|
||||||
}
|
|
||||||
|
|
||||||
std::shared_ptr<MediaHandler> encryptor = CreateEncryptionHandler(
|
std::shared_ptr<MediaHandler> encryptor = CreateEncryptionHandler(
|
||||||
packaging_params, stream, encryption_key_source);
|
packaging_params, stream, encryption_key_source);
|
||||||
|
@ -668,10 +670,9 @@ Status CreateAudioVideoJobs(
|
||||||
replicator = std::make_shared<Replicator>();
|
replicator = std::make_shared<Replicator>();
|
||||||
|
|
||||||
Status status;
|
Status status;
|
||||||
if (ad_cue_generator) {
|
if (cue_aligner) {
|
||||||
status.Update(
|
status.Update(demuxer->SetHandler(stream.stream_selector, cue_aligner));
|
||||||
demuxer->SetHandler(stream.stream_selector, ad_cue_generator));
|
status.Update(cue_aligner->AddHandler(chunker));
|
||||||
status.Update(ad_cue_generator->AddHandler(chunker));
|
|
||||||
} else {
|
} else {
|
||||||
status.Update(demuxer->SetHandler(stream.stream_selector, chunker));
|
status.Update(demuxer->SetHandler(stream.stream_selector, chunker));
|
||||||
}
|
}
|
||||||
|
@ -729,6 +730,7 @@ Status CreateAllJobs(const std::vector<StreamDescriptor>& stream_descriptors,
|
||||||
const PackagingParams& packaging_params,
|
const PackagingParams& packaging_params,
|
||||||
MpdNotifier* mpd_notifier,
|
MpdNotifier* mpd_notifier,
|
||||||
KeySource* encryption_key_source,
|
KeySource* encryption_key_source,
|
||||||
|
SyncPointQueue* sync_points,
|
||||||
MuxerListenerFactory* muxer_listener_factory,
|
MuxerListenerFactory* muxer_listener_factory,
|
||||||
MuxerFactory* muxer_factory,
|
MuxerFactory* muxer_factory,
|
||||||
JobManager* job_manager) {
|
JobManager* job_manager) {
|
||||||
|
@ -758,11 +760,11 @@ Status CreateAllJobs(const std::vector<StreamDescriptor>& stream_descriptors,
|
||||||
media::StreamDescriptorCompareFn);
|
media::StreamDescriptorCompareFn);
|
||||||
|
|
||||||
Status status;
|
Status status;
|
||||||
status.Update(CreateTextJobs(text_streams, packaging_params,
|
status.Update(CreateTextJobs(text_streams, packaging_params, sync_points,
|
||||||
muxer_listener_factory, muxer_factory,
|
muxer_listener_factory, muxer_factory,
|
||||||
mpd_notifier, job_manager));
|
mpd_notifier, job_manager));
|
||||||
status.Update(CreateAudioVideoJobs(
|
status.Update(CreateAudioVideoJobs(
|
||||||
audio_video_streams, packaging_params, encryption_key_source,
|
audio_video_streams, packaging_params, encryption_key_source, sync_points,
|
||||||
muxer_listener_factory, muxer_factory, job_manager));
|
muxer_listener_factory, muxer_factory, job_manager));
|
||||||
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
|
@ -783,6 +785,7 @@ struct Packager::PackagerInternal {
|
||||||
std::unique_ptr<KeySource> encryption_key_source;
|
std::unique_ptr<KeySource> encryption_key_source;
|
||||||
std::unique_ptr<MpdNotifier> mpd_notifier;
|
std::unique_ptr<MpdNotifier> mpd_notifier;
|
||||||
std::unique_ptr<hls::HlsNotifier> hls_notifier;
|
std::unique_ptr<hls::HlsNotifier> hls_notifier;
|
||||||
|
std::unique_ptr<SyncPointQueue> sync_points;
|
||||||
BufferCallbackParams buffer_callback_params;
|
BufferCallbackParams buffer_callback_params;
|
||||||
media::JobManager job_manager;
|
media::JobManager job_manager;
|
||||||
};
|
};
|
||||||
|
@ -854,6 +857,11 @@ Status Packager::Initialize(
|
||||||
internal->hls_notifier.reset(new hls::SimpleHlsNotifier(hls_params));
|
internal->hls_notifier.reset(new hls::SimpleHlsNotifier(hls_params));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!packaging_params.ad_cue_generator_params.cue_points.empty()) {
|
||||||
|
internal->sync_points.reset(
|
||||||
|
new SyncPointQueue(packaging_params.ad_cue_generator_params));
|
||||||
|
}
|
||||||
|
|
||||||
std::vector<StreamDescriptor> streams_for_jobs;
|
std::vector<StreamDescriptor> streams_for_jobs;
|
||||||
|
|
||||||
for (const StreamDescriptor& descriptor : stream_descriptors) {
|
for (const StreamDescriptor& descriptor : stream_descriptors) {
|
||||||
|
@ -896,8 +904,8 @@ Status Packager::Initialize(
|
||||||
|
|
||||||
Status status = media::CreateAllJobs(
|
Status status = media::CreateAllJobs(
|
||||||
streams_for_jobs, packaging_params, internal->mpd_notifier.get(),
|
streams_for_jobs, packaging_params, internal->mpd_notifier.get(),
|
||||||
internal->encryption_key_source.get(), &muxer_listener_factory,
|
internal->encryption_key_source.get(), internal->sync_points.get(),
|
||||||
&muxer_factory, &internal->job_manager);
|
&muxer_listener_factory, &muxer_factory, &internal->job_manager);
|
||||||
|
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
return status;
|
return status;
|
||||||
|
|
Loading…
Reference in New Issue