Fix incorrrect segment name with $Time$ in segment_template

The time for the previous segment was used when generating the segment
name. This resulted in the first segment being overwritten and
mismatching manifest and media files. It led to playback problems.

Issue #472.

Change-Id: Ia8130ce261585e1a2ede83b26de3e32508de087f
This commit is contained in:
KongQun Yang 2018-09-12 14:47:20 -07:00
parent d0978b3937
commit 31e5f129b5
11 changed files with 49 additions and 9 deletions

View File

@ -271,6 +271,7 @@ class PackagerAppTest(unittest.TestCase):
output_file_prefix=None,
output_format=None,
segmented=False,
using_time_specifier=False,
hls=False,
trick_play_factor=None,
drm_label=None,
@ -294,6 +295,8 @@ class PackagerAppTest(unittest.TestCase):
output_format: Specify the format for the output.
segmented: Should the output use a segmented formatted. This will affect
the output extensions and manifests.
using_time_specifier: Use $Time$ in segment name instead of using
$Number$. This flag is only relevant if segmented is True.
hls: Should the output be for an HLS manifest.
trick_play_factor: Signals the stream is to be used for a trick play
stream and which key frames to use. A trick play factor of 0 is the
@ -358,8 +361,10 @@ class PackagerAppTest(unittest.TestCase):
stream.Append('init_segment', init_seg)
if segmented:
segment_specifier = '$Time$' if using_time_specifier else '$Number$'
segment_ext = GetSegmentedExtension(base_ext)
seg_template = '%s-$Number$.%s' % (output_file_path, segment_ext)
seg_template = '%s-%s.%s' % (output_file_path, segment_specifier,
segment_ext)
stream.Append('segment_template', seg_template)
else:
if split_content_on_ad_cues:
@ -1401,6 +1406,14 @@ class PackagerFunctionalTest(PackagerAppTest):
self._GetFlags(output_dash=True, generate_static_mpd=True))
self._CheckTestResults('live-static-profile')
def testLiveStaticProfileWithTimeInSegmentName(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'],
segmented=True,
using_time_specifier=True),
self._GetFlags(output_dash=True, generate_static_mpd=True))
self._CheckTestResults('live-static-profile-with-time-in-segment-name')
def testLiveProfileAndEncryption(self):
self.assertPackageSuccess(
self._GetStreams(['audio', 'video'], segmented=True),

View File

@ -0,0 +1,28 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--Generated with https://github.com/google/shaka-packager version <tag>-<hash>-<test>-->
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" profiles="urn:mpeg:dash:profile:isoff-live:2011" minBufferTime="PT2S" type="static" mediaPresentationDuration="PT2.7360665798187256S">
<Period id="0">
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="30000/1001" segmentAlignment="true" par="16:9">
<Representation id="0" bandwidth="974122" codecs="avc1.64001e" mimeType="video/mp4" sar="1:1">
<SegmentTemplate timescale="30000" initialization="bear-640x360-video-init.mp4" media="bear-640x360-video-$Time$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="30030" r="1"/>
<S t="60060" d="22022"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>
</AdaptationSet>
<AdaptationSet id="1" contentType="audio" segmentAlignment="true">
<Representation id="1" bandwidth="133929" codecs="mp4a.40.2" mimeType="audio/mp4" audioSamplingRate="44100">
<AudioChannelConfiguration schemeIdUri="urn:mpeg:dash:23003:3:audio_channel_configuration:2011" value="2"/>
<SegmentTemplate timescale="44100" initialization="bear-640x360-audio-init.mp4" media="bear-640x360-audio-$Time$.m4s" startNumber="1">
<SegmentTimeline>
<S t="0" d="45056"/>
<S t="45056" d="44032"/>
<S t="89088" d="31744"/>
</SegmentTimeline>
</SegmentTemplate>
</Representation>
</AdaptationSet>
</Period>
</MPD>

View File

@ -92,6 +92,12 @@ Status MultiSegmentSegmenter::WriteSegment() {
DCHECK(fragment_buffer());
DCHECK(styp_);
DCHECK(!sidx()->references.empty());
// earliest_presentation_time is the earliest presentation time of any access
// unit in the reference stream in the first subsegment.
sidx()->earliest_presentation_time =
sidx()->references[0].earliest_presentation_time;
std::unique_ptr<BufferWriter> buffer(new BufferWriter());
std::unique_ptr<File, FileCloser> file;
std::string file_name;
@ -115,15 +121,8 @@ Status MultiSegmentSegmenter::WriteSegment() {
styp_->Write(buffer.get());
}
if (options().mp4_params.generate_sidx_in_media_segments) {
DCHECK(sidx());
DCHECK(!sidx()->references.empty());
// earliest_presentation_time is the earliest presentation time of any
// access unit in the reference stream in the first subsegment.
sidx()->earliest_presentation_time =
sidx()->references[0].earliest_presentation_time;
if (options().mp4_params.generate_sidx_in_media_segments)
sidx()->Write(buffer.get());
}
const size_t segment_header_size = buffer->Size();
const size_t segment_size = segment_header_size + fragment_buffer()->Size();