[DASH] Fix TTML text input passthrough (regression)

Note that TTML in ISO-BMFF is not supported yet.

Also updated packager_test.py:
- Added a test using TTML passthrough.
- Computed output extension from input extension unless output_format
  is specified.

Fixes #478.

Change-Id: Ia917fc4ed3c326782791ed67601fba02ea28b11d
This commit is contained in:
KongQun Yang 2018-09-18 13:34:18 -07:00
parent 5a912815c1
commit 42083d205d
37 changed files with 209 additions and 249 deletions

View File

@ -215,14 +215,12 @@ def _UpdateMpdTimes(mpd_filepath):
f.write(content)
def GetExtension(stream_descriptor, output_format):
def GetExtension(input_file_path, output_format):
if output_format:
return output_format
# TODO(rkuroiwa): Support ttml.
if stream_descriptor == 'text':
return 'vtt'
# Default to mp4.
return 'mp4'
# Otherwise use the same extension as the input.
ext = os.path.splitext(input_file_path)[1]
return ext[1:] # Remove the leading '.'.
def GetSegmentedExtension(base_extension):
@ -336,7 +334,7 @@ class PackagerAppTest(unittest.TestCase):
if skip_encryption:
stream.Append('skip_encryption', 1)
base_ext = GetExtension(descriptor, output_format)
base_ext = GetExtension(input_file_path, output_format)
output_file_name_base = stream.GetOutputFileNameBase(output_file_prefix)
if hls:
@ -516,13 +514,10 @@ class PackagerAppTest(unittest.TestCase):
self.assertIn('Found 1 stream(s).', stream_info)
self.assertIn(info, stream_info)
def _Decrypt(self, file_path, output_format):
def _Decrypt(self, file_path):
streams = [
self._GetStream(
'0',
output_file_prefix='decrypted',
output_format=output_format,
test_file=file_path)
'0', output_file_prefix='decrypted', test_file=file_path)
]
self.assertPackageSuccess(streams, self._GetFlags(decryption=True))
@ -560,7 +555,7 @@ class PackagerAppTest(unittest.TestCase):
continue
extension = os.path.splitext(file_name)[1][1:]
if extension not in ['mpd', 'm3u8', 'media_info']:
self._Decrypt(os.path.join(self.tmp_dir, file_name), extension)
self._Decrypt(os.path.join(self.tmp_dir, file_name))
out_dir = self.tmp_dir
gold_dir = os.path.join(self.golden_file_dir, test_dir)
@ -628,12 +623,6 @@ class PackagerFunctionalTest(PackagerAppTest):
self._GetStreams(['0']), self._GetFlags(output_dash=True))
self._CheckTestResults('first-stream')
def testText(self):
self.assertPackageSuccess(
self._GetStreams(['text'], test_files=['subtitle-english.vtt']),
self._GetFlags(output_dash=True))
self._CheckTestResults('text')
# Probably one of the most common scenarios is to package audio and video.
def testAudioVideo(self):
self.assertPackageSuccess(
@ -715,14 +704,19 @@ class PackagerFunctionalTest(PackagerAppTest):
self._GetFlags(output_dash=True))
self._CheckTestResults('acc-he')
# Package all video, audio, and text.
def testVideoAudioText(self):
def testVideoAudioWebVTT(self):
audio_video_streams = self._GetStreams(['audio', 'video'])
text_stream = self._GetStreams(['text'],
test_files=['subtitle-english.vtt'])
text_stream = self._GetStreams(['text'], test_files=['bear-english.vtt'])
self.assertPackageSuccess(audio_video_streams + text_stream,
self._GetFlags(output_dash=True))
self._CheckTestResults('video-audio-text')
self._CheckTestResults('video-audio-webvtt')
def testVideoAudioTTML(self):
audio_video_streams = self._GetStreams(['audio', 'video'])
text_stream = self._GetStreams(['text'], test_files=['bear-english.ttml'])
self.assertPackageSuccess(audio_video_streams + text_stream,
self._GetFlags(output_dash=True))
self._CheckTestResults('video-audio-ttml')
def testVideoNoEditList(self):
stream = self._GetStream('video', test_file='bear-640x360-no_edit_list.mp4')
@ -732,42 +726,38 @@ class PackagerFunctionalTest(PackagerAppTest):
def testAvcAacTs(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('avc-aac-ts')
def testAvcAc3Ts(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetFlags(output_hls=True))
self._CheckTestResults('avc-ac3-ts')
def testAvcAc3TsToMp4(self):
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'], hls=True, test_files=['bear-640x360-ac3.ts']),
self._GetStreams(['audio', 'video'],
output_format='mp4',
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetFlags(output_hls=True))
self._CheckTestResults('avc-ac3-ts-to-mp4')
def testAvcTsLivePlaylist(self):
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
output_hls=True,
hls_playlist_type='LIVE',
@ -776,12 +766,10 @@ class PackagerFunctionalTest(PackagerAppTest):
def testAvcTsLivePlaylistWithKeyRotation(self):
self.packager.Package(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
encryption=True,
key_rotation=True,
@ -792,12 +780,10 @@ class PackagerFunctionalTest(PackagerAppTest):
def testAvcTsEventPlaylist(self):
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(
output_hls=True,
hls_playlist_type='EVENT',
@ -824,43 +810,34 @@ class PackagerFunctionalTest(PackagerAppTest):
def testVp8Webm(self):
self.assertPackageSuccess(
self._GetStreams(['video'],
output_format='webm',
test_files=['bear-640x360.webm']),
self._GetStreams(['video'], test_files=['bear-640x360.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vp8-webm')
def testVp9Webm(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
output_format='webm',
test_files=['bear-320x240-vp9-opus.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vp9-webm')
def testVp9WebmWithBlockgroup(self):
self.assertPackageSuccess(
self._GetStreams(['video'],
output_format='webm',
test_files=['bear-vp9-blockgroup.webm']),
self._GetStreams(['video'], test_files=['bear-vp9-blockgroup.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vp9-webm-with-blockgroup')
def testVorbisWebm(self):
self.assertPackageSuccess(
self._GetStreams(['audio'],
output_format='webm',
test_files=['bear-320x240-audio-only.webm']),
self._GetFlags(output_dash=True))
self._CheckTestResults('vorbis-webm')
def testAv1Mp4(self):
self.assertPackageSuccess(
self._GetStreams(['video'],
output_format='mp4',
test_files=['bear-av1.mp4']),
self._GetFlags(output_dash=True, output_hls=True)
)
self._GetStreams(['video'], test_files=['bear-av1.mp4']),
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('av1-mp4')
def testAv1Mp4ToWebM(self):
@ -868,8 +845,7 @@ class PackagerFunctionalTest(PackagerAppTest):
self._GetStreams(['video'],
output_format='webm',
test_files=['bear-av1.mp4']),
self._GetFlags(output_dash=True, output_hls=True)
)
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('av1-mp4-to-webm')
def testAv1WebM(self):
@ -877,8 +853,7 @@ class PackagerFunctionalTest(PackagerAppTest):
self._GetStreams(['video'],
output_format='mp4',
test_files=['bear-av1.webm']),
self._GetFlags(output_dash=True, output_hls=True)
)
self._GetFlags(output_dash=True, output_hls=True))
self._CheckTestResults('av1-webm')
def testEncryption(self):
@ -1029,16 +1004,10 @@ class PackagerFunctionalTest(PackagerAppTest):
def testHlsAudioVideoTextWithAdCues(self):
streams = [
self._GetStream('audio',
hls=True,
segmented=True),
self._GetStream('video',
hls=True,
segmented=True),
self._GetStream('text',
hls=True,
segmented=True,
test_file='bear-subtitle-english.vtt')
self._GetStream('audio', hls=True, segmented=True),
self._GetStream('video', hls=True, segmented=True),
self._GetStream(
'text', hls=True, segmented=True, test_file='bear-english.vtt')
]
flags = self._GetFlags(output_hls=True, ad_cues='1.5')
self.assertPackageSuccess(streams, flags)
@ -1046,17 +1015,14 @@ class PackagerFunctionalTest(PackagerAppTest):
def testVttTextToMp4WithAdCues(self):
streams = [
self._GetStream('audio',
hls=True,
segmented=True),
self._GetStream('video',
hls=True,
segmented=True),
self._GetStream('text',
hls=True,
segmented=True,
test_file='bear-subtitle-english.vtt',
output_format='mp4')
self._GetStream('audio', hls=True, segmented=True),
self._GetStream('video', hls=True, segmented=True),
self._GetStream(
'text',
hls=True,
segmented=True,
test_file='bear-english.vtt',
output_format='mp4')
]
flags = self._GetFlags(output_dash=True, output_hls=True,
generate_static_mpd=True, ad_cues='1.5')
@ -1072,9 +1038,7 @@ class PackagerFunctionalTest(PackagerAppTest):
def testWebmSubsampleEncryption(self):
streams = [
self._GetStream('video',
output_format='webm',
test_file='bear-320x180-vp9-altref.webm')
self._GetStream('video', test_file='bear-320x180-vp9-altref.webm')
]
self.assertPackageSuccess(streams,
self._GetFlags(encryption=True, output_dash=True))
@ -1082,9 +1046,7 @@ class PackagerFunctionalTest(PackagerAppTest):
def testWebmVp9FullSampleEncryption(self):
streams = [
self._GetStream('video',
output_format='webm',
test_file='bear-320x180-vp9-altref.webm')
self._GetStream('video', test_file='bear-320x180-vp9-altref.webm')
]
flags = self._GetFlags(
encryption=True, vp9_subsample_encryption=False, output_dash=True)
@ -1096,12 +1058,10 @@ class PackagerFunctionalTest(PackagerAppTest):
def testAvcTsWithEncryption(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(encryption=True, output_hls=True))
self._CheckTestResults('avc-ts-with-encryption')
@ -1115,11 +1075,7 @@ class PackagerFunctionalTest(PackagerAppTest):
hls=True,
test_file='bear-640x360.ts'),
self._GetStream(
'video',
output_format='ts',
segmented=True,
hls=True,
test_file='bear-640x360.ts')
'video', segmented=True, hls=True, test_file='bear-640x360.ts')
]
flags = self._GetFlags(encryption=True, output_hls=True)
@ -1129,24 +1085,20 @@ class PackagerFunctionalTest(PackagerAppTest):
def testAvcTsWithEncryptionAndFairPlay(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360.ts']),
self._GetFlags(encryption=True, output_hls=True, fairplay=True))
self._CheckTestResults('avc-ts-with-encryption-and-fairplay')
def testAvcAc3TsWithEncryption(self):
# Currently we only support live packaging for ts.
self.assertPackageSuccess(
self._GetStreams(
['audio', 'video'],
output_format='ts',
segmented=True,
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetStreams(['audio', 'video'],
segmented=True,
hls=True,
test_files=['bear-640x360-ac3.ts']),
self._GetFlags(encryption=True, output_hls=True))
self._CheckTestResults('avc-ac3-ts-with-encryption')
@ -1160,11 +1112,7 @@ class PackagerFunctionalTest(PackagerAppTest):
hls=True,
test_file='bear-640x360-ac3.ts'),
self._GetStream(
'video',
output_format='ts',
segmented=True,
hls=True,
test_file='bear-640x360-ac3.ts')
'video', segmented=True, hls=True, test_file='bear-640x360-ac3.ts')
]
flags = self._GetFlags(encryption=True, output_hls=True)
@ -1188,11 +1136,7 @@ class PackagerFunctionalTest(PackagerAppTest):
'avc-ts-with-encryption-exercise-emulation-prevention')
def testWebmWithEncryption(self):
streams = [
self._GetStream('video',
output_format='webm',
test_file='bear-640x360.webm')
]
streams = [self._GetStream('video', test_file='bear-640x360.webm')]
flags = self._GetFlags(encryption=True, output_dash=True)
self.assertPackageSuccess(streams, flags)
@ -1235,8 +1179,7 @@ class PackagerFunctionalTest(PackagerAppTest):
def testFlacWithEncryption(self):
streams = [
self._GetStream(
'audio', output_format='mp4', test_file='bear-flac.mp4'),
self._GetStream('audio', test_file='bear-flac.mp4'),
]
flags = self._GetFlags(encryption=True, output_dash=True, output_hls=True)
@ -1246,8 +1189,9 @@ class PackagerFunctionalTest(PackagerAppTest):
def testWvmInput(self):
self.encryption_key = '9248d245390e0a49d483ba9b43fc69c3'
self.assertPackageSuccess(
self._GetStreams(
['0', '1', '2', '3'], test_files=['bear-multi-configs.wvm']),
self._GetStreams(['0', '1', '2', '3'],
output_format='mp4',
test_files=['bear-multi-configs.wvm']),
self._GetFlags(decryption=True, output_dash=True))
# Output timescale is 90000.
self._CheckTestResults('wvm-input')
@ -1262,8 +1206,9 @@ class PackagerFunctionalTest(PackagerAppTest):
def testWvmInputWithoutStrippingParameterSetNalus(self):
self.encryption_key = '9248d245390e0a49d483ba9b43fc69c3'
self.assertPackageSuccess(
self._GetStreams(
['0', '1', '2', '3'], test_files=['bear-multi-configs.wvm']),
self._GetStreams(['0', '1', '2', '3'],
output_format='mp4',
test_files=['bear-multi-configs.wvm']),
self._GetFlags(
strip_parameter_set_nalus=False, decryption=True, output_dash=True))
# Output timescale is 90000.
@ -1389,12 +1334,9 @@ class PackagerFunctionalTest(PackagerAppTest):
self._CheckTestResults('live-profile')
def testLiveProfileWithWebM(self):
streams = self._GetStreams(
['audio', 'video'],
segmented=True,
output_format='webm',
test_file='bear-640x360.webm'
)
streams = self._GetStreams(['audio', 'video'],
segmented=True,
test_file='bear-640x360.webm')
flags = self._GetFlags(output_dash=True, output_hls=True)
self.assertPackageSuccess(streams, flags)
@ -1537,7 +1479,7 @@ class PackagerFunctionalTest(PackagerAppTest):
streams = self._GetStreams(
['audio', 'video'], output_format='ts', segmented=True)
streams += self._GetStreams(
['text'], test_files=['bear-subtitle-english.vtt'], segmented=True)
['text'], test_files=['bear-english.vtt'], segmented=True)
flags = self._GetFlags(output_hls=True)

View File

@ -4,16 +4,16 @@
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXTINF:1.000,
bear-subtitle-english-text-1.vtt
bear-english-text-1.vtt
#EXTINF:1.000,
bear-subtitle-english-text-2.vtt
bear-english-text-2.vtt
#EXTINF:0.001,
bear-subtitle-english-text-3.vtt
bear-english-text-3.vtt
#EXT-X-PLACEMENT-OPPORTUNITY
#EXTINF:1.000,
bear-subtitle-english-text-4.vtt
bear-english-text-4.vtt
#EXTINF:1.000,
bear-subtitle-english-text-5.vtt
bear-english-text-5.vtt
#EXTINF:1.000,
bear-subtitle-english-text-6.vtt
bear-english-text-6.vtt
#EXT-X-ENDLIST

View File

@ -3,7 +3,7 @@
#EXT-X-MEDIA:TYPE=AUDIO,URI="bear-640x360-audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_1",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-MEDIA:TYPE=SUBTITLES,URI="bear-subtitle-english-text.m3u8",GROUP-ID="default-text-group",NAME="stream_0",AUTOSELECT=YES
#EXT-X-MEDIA:TYPE=SUBTITLES,URI="bear-english-text.m3u8",GROUP-ID="default-text-group",NAME="stream_0",AUTOSELECT=YES
#EXT-X-STREAM-INF:BANDWIDTH=1108051,AVERAGE-BANDWIDTH=1005999,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group",SUBTITLES="default-text-group"
bear-640x360-video.m3u8

View File

@ -4,13 +4,13 @@
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXTINF:1.000,
bear-subtitle-english-text-1.vtt
bear-english-text-1.vtt
#EXTINF:1.000,
bear-subtitle-english-text-2.vtt
bear-english-text-2.vtt
#EXTINF:1.000,
bear-subtitle-english-text-3.vtt
bear-english-text-3.vtt
#EXTINF:1.000,
bear-subtitle-english-text-4.vtt
bear-english-text-4.vtt
#EXTINF:1.000,
bear-subtitle-english-text-5.vtt
bear-english-text-5.vtt
#EXT-X-ENDLIST

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en">
<body>
<div>
<p begin="0.1s" end="0.8s">Yup, that's a bear, eh.</p>
<p begin="1.0s" end="4.7s">He 's... um... doing bear-like stuff.</p>
</div>
</body>
</tt>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>-->
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.7360665798187256S">
<Period id="0">
<AdaptationSet id="0" contentType="text" subsegmentAlignment="true">
<Representation id="0" bandwidth="256" mimeType="application/ttml+xml">
<BaseURL>bear-english-text.ttml</BaseURL>
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
<Representation id="1" bandwidth="973483" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<BaseURL>bear-640x360-video.mp4</BaseURL>
<SegmentBase indexRange="859-926" timescale="30000">
<Initialization range="0-858"/>
</SegmentBase>
</Representation>
</AdaptationSet>
<AdaptationSet id="2" contentType="audio" subsegmentAlignment="true">
<Representation id="2" bandwidth="133334" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>bear-640x360-audio.mp4</BaseURL>
<SegmentBase indexRange="793-860" timescale="44100">
<Initialization range="0-792"/>
</SegmentBase>
</Representation>
</AdaptationSet>
</Period>
</MPD>

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>-->
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.7360665798187256S">
<Period id="0">
<AdaptationSet id="0" contentType="text" subsegmentAlignment="true">
<Representation id="0" bandwidth="256" mimeType="text/vtt">
<BaseURL>bear-english-text.vtt</BaseURL>
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="video" width="640" height="360" frameRate="30000/1001" subsegmentAlignment="true" par="16:9">
<Representation id="1" bandwidth="973483" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<BaseURL>bear-640x360-video.mp4</BaseURL>
<SegmentBase indexRange="859-926" timescale="30000">
<Initialization range="0-858"/>
</SegmentBase>
</Representation>
</AdaptationSet>
<AdaptationSet id="2" contentType="audio" subsegmentAlignment="true">
<Representation id="2" bandwidth="133334" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<BaseURL>bear-640x360-audio.mp4</BaseURL>
<SegmentBase indexRange="793-860" timescale="44100">
<Initialization range="0-792"/>
</SegmentBase>
</Representation>
</AdaptationSet>
</Period>
</MPD>

View File

@ -3,18 +3,18 @@
## Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>
#EXT-X-TARGETDURATION:2
#EXT-X-PLAYLIST-TYPE:VOD
#EXT-X-MAP:URI="bear-subtitle-english-text-init.mp4"
#EXT-X-MAP:URI="bear-english-text-init.mp4"
#EXTINF:1.000,
bear-subtitle-english-text-1.m4s
bear-english-text-1.m4s
#EXTINF:1.000,
bear-subtitle-english-text-2.m4s
bear-english-text-2.m4s
#EXTINF:0.001,
bear-subtitle-english-text-3.m4s
bear-english-text-3.m4s
#EXT-X-PLACEMENT-OPPORTUNITY
#EXTINF:1.000,
bear-subtitle-english-text-4.m4s
bear-english-text-4.m4s
#EXTINF:1.000,
bear-subtitle-english-text-5.m4s
bear-english-text-5.m4s
#EXTINF:1.000,
bear-subtitle-english-text-6.m4s
bear-english-text-6.m4s
#EXT-X-ENDLIST

View File

@ -3,7 +3,7 @@
#EXT-X-MEDIA:TYPE=AUDIO,URI="bear-640x360-audio.m3u8",GROUP-ID="default-audio-group",NAME="stream_1",AUTOSELECT=YES,CHANNELS="2"
#EXT-X-MEDIA:TYPE=SUBTITLES,URI="bear-subtitle-english-text.m3u8",GROUP-ID="default-text-group",NAME="stream_0",AUTOSELECT=YES
#EXT-X-MEDIA:TYPE=SUBTITLES,URI="bear-english-text.m3u8",GROUP-ID="default-text-group",NAME="stream_0",AUTOSELECT=YES
#EXT-X-STREAM-INF:BANDWIDTH=1108051,AVERAGE-BANDWIDTH=1005999,CODECS="avc1.64001e,mp4a.40.2",RESOLUTION=640x360,AUDIO="default-audio-group",SUBTITLES="default-text-group"
bear-640x360-video.m3u8

View File

@ -5,7 +5,7 @@
<AdaptationSet id="0" contentType="text" segmentAlignment="true">
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="subtitle"/>
<Representation id="0" bandwidth="1896000" codecs="wvtt" mimeType="application/mp4">
<SegmentTemplate timescale="1000" initialization="bear-subtitle-english-text-init.mp4" media="bear-subtitle-english-text-$Number$.m4s" startNumber="1">
<SegmentTemplate timescale="1000" initialization="bear-english-text-init.mp4" media="bear-english-text-$Number$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="1000" r="1"/>
<S t="2000" d="1"/>
@ -38,7 +38,7 @@
<AdaptationSet id="0" contentType="text" segmentAlignment="true">
<Role schemeIdUri="urn:mpeg:dash:role:2011" value="subtitle"/>
<Representation id="0" bandwidth="2024" codecs="wvtt" mimeType="application/mp4">
<SegmentTemplate timescale="1000" presentationTimeOffset="2001" initialization="bear-subtitle-english-text-init.mp4" media="bear-subtitle-english-text-$Number$.m4s" startNumber="4">
<SegmentTemplate timescale="1000" presentationTimeOffset="2001" initialization="bear-english-text-init.mp4" media="bear-english-text-$Number$.m4s" startNumber="4">
<SegmentTimeline>
<S t="2001" d="1000" r="2"/>
</SegmentTimeline>

View File

@ -0,0 +1,9 @@
<?xml version="1.0" encoding="utf-8"?>
<tt xmlns="http://www.w3.org/ns/ttml" xml:lang="en">
<body>
<div>
<p begin="0.1s" end="0.8s">Yup, that's a bear, eh.</p>
<p begin="1.0s" end="4.7s">He 's... um... doing bear-like stuff.</p>
</div>
</body>
</tt>

View File

@ -0,0 +1,10 @@
WEBVTT
STYLE
::cue { color:lime }
00:00:00.000 --> 00:00:00.800
Yup, that's a bear, eh.
00:00:01.000 --> 00:00:04.700
He 's... um... doing bear-like stuff.

View File

@ -1,79 +0,0 @@
WEBVTT
1
00:00:03.837 --> 00:00:07.299
Captain's log, stardate 41636.9.
2
00:00:07.466 --> 00:00:11.845
As feared, our examination of the
overdue Federation freighter Odin,
3
00:00:12.012 --> 00:00:16.475
disabled by an asteroid collision,
revealed no life signs.
4
00:00:16.642 --> 00:00:19.019
However three escape pods
were missing,
5
00:00:19.186 --> 00:00:21.939
suggesting
the possibility of survivors.
6
00:00:22.606 --> 00:00:27.861
- Ready to orbit Angel One.
- What kind of place is this, Data?
7
00:00:28.028 --> 00:00:31.615
A Class-M planet supporting
carbon-based flora and fauna,
8
00:00:31.782 --> 00:00:34.326
sparsely populated
with intelligent life.
9
00:00:34.493 --> 00:00:38.497
Similar in technological development
to mid-20th century Earth.
10
00:00:38.664 --> 00:00:41.000
Kinda like being marooned at home.
11
00:00:41.166 --> 00:00:43.586
Assuming any survivors
made it this far.
12
00:00:43.794 --> 00:00:49.174
It is the closest planet, but to
go the distance we did in two days,
13
00:00:49.341 --> 00:00:52.344
would've taken the Odin escape pod
five months.
14
00:00:52.511 --> 00:00:54.680
Five months, six days, 11 hours,
two min...
15
00:00:54.847 --> 00:00:58.392
- Thank you, Data.
- ...and 57 seconds.
16
00:00:58.559 --> 00:01:01.353
Receiving an audio signal
from Angel One.

View File

@ -536,7 +536,8 @@ Status CreateTextJobs(
DCHECK(muxer_listener_factory);
DCHECK(job_manager);
for (const StreamDescriptor& stream : streams) {
// There are currently four options:
// There are currently options:
// TEXT TTML --> TEXT TTML [ supported ], for DASH only.
// TEXT WEBVTT --> TEXT WEBVTT [ supported ]
// TEXT WEBVTT --> MP4 WEBVTT [ supported ]
// MP4 WEBVTT --> MP4 WEBVTT [ unsupported ]
@ -544,12 +545,20 @@ Status CreateTextJobs(
const auto input_container = DetermineContainerFromFileName(stream.input);
const auto output_container = GetOutputFormat(stream);
if (input_container != CONTAINER_WEBVTT) {
if (input_container != CONTAINER_WEBVTT &&
input_container != CONTAINER_TTML) {
return Status(error::INVALID_ARGUMENT,
"Text output format is not support for " + stream.input);
}
if (output_container == CONTAINER_MOV) {
if (input_container == CONTAINER_TTML) {
return Status(error::INVALID_ARGUMENT,
"TTML in MP4 is not supported yet. Please follow "
"https://github.com/google/shaka-packager/issues/87 for "
"the updates.");
}
std::unique_ptr<MuxerListener> muxer_listener =
muxer_listener_factory->CreateListener(ToMuxerListenerData(stream));
@ -566,6 +575,10 @@ Status CreateTextJobs(
// Check input to ensure that output is possible.
if (hls_listener) {
if (input_container == CONTAINER_TTML) {
return Status(error::INVALID_ARGUMENT,
"HLS does not support TTML in xml format.");
}
if (stream.segment_template.empty() || !stream.output.empty()) {
return Status(error::INVALID_ARGUMENT,
"segment_template needs to be specified for HLS text "