WebMParser: set duration and dts correctly
- dts was not set earlier, although for WebM, we could assume that dts is the same as pts. - Calculate block duration with the difference with the next block if duration is not encoded, even if track default_duration is set. - Use track default_duration as a duration estimate for the last block. - This also removes opus duration computation from encoded data. Issues #67, #68 Change-Id: Icaa2769dcb2a89269ae014f44ad6a9262770aed2
This commit is contained in:
parent
7869d509a2
commit
940c3571aa
Binary file not shown.
|
@ -1,15 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--Generated with https://github.com/google/edash-packager version <tag>-<hash>-<test>-->
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7060000896453857S">
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7360000610351562S">
|
||||
<Period id="0">
|
||||
<AdaptationSet id="0" contentType="video" width="320" height="240" frameRate="1000000/33000" subsegmentAlignment="true" par="16:9">
|
||||
<Representation id="0" bandwidth="210593" codecs="vp09.00.00.08.00.01.00.00" mimeType="video/mp4" sar="427:320">
|
||||
<AdaptationSet id="0" contentType="video" width="320" height="240" frameRate="1000000/34000" subsegmentAlignment="true" par="16:9">
|
||||
<Representation id="0" bandwidth="210135" codecs="vp09.00.00.08.00.01.00.00" mimeType="video/mp4" sar="427:320">
|
||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||
<ContentProtection schemeIdUri="urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed">
|
||||
<cenc:pssh>AAAAMHBzc2gAAAAA7e+LqXnWSs6jyCfc1R0h7QAAABAxMjM0NTY3ODkwMTIzNDU2</cenc:pssh>
|
||||
</ContentProtection>
|
||||
<BaseURL>output_video.mp4</BaseURL>
|
||||
<SegmentBase indexRange="1039-1094" timescale="1000000">
|
||||
<SegmentBase indexRange="1039-1106" timescale="1000000">
|
||||
<Initialization range="0-1038"/>
|
||||
</SegmentBase>
|
||||
</Representation>
|
||||
|
|
Binary file not shown.
|
@ -1,11 +1,11 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--Generated with https://github.com/google/edash-packager version <tag>-<hash>-<test>-->
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7059998512268066S">
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7360000610351562S">
|
||||
<Period id="0">
|
||||
<AdaptationSet id="0" contentType="video" width="320" height="240" frameRate="1000000/33000" par="16:9">
|
||||
<Representation id="0" bandwidth="205387" codecs="vp9" mimeType="video/webm" sar="427:320">
|
||||
<AdaptationSet id="0" contentType="video" width="320" height="240" frameRate="1000000/34000" par="16:9">
|
||||
<Representation id="0" bandwidth="203226" codecs="vp9" mimeType="video/webm" sar="427:320">
|
||||
<BaseURL>output_video.webm</BaseURL>
|
||||
<SegmentBase indexRange="69439-69472" timescale="1000000">
|
||||
<SegmentBase indexRange="69455-69503" timescale="1000000">
|
||||
<Initialization range="0-286"/>
|
||||
</SegmentBase>
|
||||
</Representation>
|
||||
|
|
Binary file not shown.
|
@ -1,15 +1,15 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--Generated with https://github.com/google/edash-packager version <tag>-<hash>-<test>-->
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7060000896453857S">
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7360000610351562S">
|
||||
<Period id="0">
|
||||
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="1000000/33000" subsegmentAlignment="true" par="16:9">
|
||||
<Representation id="0" bandwidth="344796" codecs="vp08.00.00.08.01.01.00.00" mimeType="video/mp4" sar="1:1">
|
||||
<Representation id="0" bandwidth="342164" codecs="vp08.00.00.08.01.01.00.00" mimeType="video/mp4" sar="1:1">
|
||||
<ContentProtection value="cenc" schemeIdUri="urn:mpeg:dash:mp4protection:2011" cenc:default_KID="31323334-3536-3738-3930-313233343536"/>
|
||||
<ContentProtection schemeIdUri="urn:uuid:edef8ba9-79d6-4ace-a3c8-27dcd51d21ed">
|
||||
<cenc:pssh>AAAAMHBzc2gAAAAA7e+LqXnWSs6jyCfc1R0h7QAAABAxMjM0NTY3ODkwMTIzNDU2</cenc:pssh>
|
||||
</ContentProtection>
|
||||
<BaseURL>output_video.mp4</BaseURL>
|
||||
<SegmentBase indexRange="1007-1062" timescale="1000000">
|
||||
<SegmentBase indexRange="1007-1074" timescale="1000000">
|
||||
<Initialization range="0-1006"/>
|
||||
</SegmentBase>
|
||||
</Representation>
|
||||
|
|
Binary file not shown.
|
@ -1,11 +1,11 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!--Generated with https://github.com/google/edash-packager version <tag>-<hash>-<test>-->
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7059998512268066S">
|
||||
<MPD xmlns="urn:mpeg:dash:schema:mpd:2011" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xlink="http://www.w3.org/1999/xlink" xsi:schemaLocation="urn:mpeg:dash:schema:mpd:2011 DASH-MPD.xsd" xmlns:cenc="urn:mpeg:cenc:2013" minBufferTime="PT2S" type="static" profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" mediaPresentationDuration="PT2.7360000610351562S">
|
||||
<Period id="0">
|
||||
<AdaptationSet id="0" contentType="video" width="640" height="360" frameRate="1000000/33000" par="16:9">
|
||||
<Representation id="0" bandwidth="338992" codecs="vp8" mimeType="video/webm" sar="1:1">
|
||||
<Representation id="0" bandwidth="335366" codecs="vp8" mimeType="video/webm" sar="1:1">
|
||||
<BaseURL>output_video.webm</BaseURL>
|
||||
<SegmentBase indexRange="114630-114664" timescale="1000000">
|
||||
<SegmentBase indexRange="114646-114695" timescale="1000000">
|
||||
<Initialization range="0-288"/>
|
||||
</SegmentBase>
|
||||
</Representation>
|
||||
|
|
|
@ -1,91 +0,0 @@
|
|||
// Copyright (c) 2015 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "packager/base/logging.h"
|
||||
#include "packager/media/formats/webm/opus_packet_builder.h"
|
||||
#include "packager/media/formats/webm/webm_cluster_parser.h"
|
||||
|
||||
namespace edash_packager {
|
||||
namespace media {
|
||||
|
||||
OpusPacket::OpusPacket(uint8_t config, uint8_t frame_count, bool is_VBR) {
|
||||
DCHECK_GE(config, 0);
|
||||
DCHECK_LT(config, kNumPossibleOpusConfigs);
|
||||
DCHECK_GE(frame_count, kMinOpusPacketFrameCount);
|
||||
DCHECK_LE(frame_count, kMaxOpusPacketFrameCount);
|
||||
|
||||
duration_ms_ = frame_count *
|
||||
WebMClusterParser::kOpusFrameDurationsMu[config] /
|
||||
static_cast<float>(1000);
|
||||
|
||||
uint8_t frame_count_code;
|
||||
uint8_t frame_count_byte;
|
||||
|
||||
if (frame_count == 1) {
|
||||
frame_count_code = 0;
|
||||
} else if (frame_count == 2) {
|
||||
frame_count_code = is_VBR ? 2 : 1;
|
||||
} else {
|
||||
frame_count_code = 3;
|
||||
frame_count_byte = (is_VBR ? 1 << 7 : 0) | frame_count;
|
||||
}
|
||||
|
||||
// All opus packets must have TOC byte.
|
||||
uint8_t opus_toc_byte = (config << 3) | frame_count_code;
|
||||
data_.push_back(opus_toc_byte);
|
||||
|
||||
// For code 3 packets, the number of frames is signaled in the "frame
|
||||
// count byte".
|
||||
if (frame_count_code == 3) {
|
||||
data_.push_back(frame_count_byte);
|
||||
}
|
||||
|
||||
// Packet will only conform to layout specification for the TOC byte
|
||||
// and optional frame count bytes appended above. This last byte
|
||||
// is purely dummy padding where frame size data or encoded data might
|
||||
// otherwise start.
|
||||
data_.push_back(static_cast<uint8_t>(0));
|
||||
}
|
||||
|
||||
OpusPacket::~OpusPacket() {
|
||||
}
|
||||
|
||||
const uint8_t* OpusPacket::data() const {
|
||||
return &(data_[0]);
|
||||
}
|
||||
|
||||
int OpusPacket::size() const {
|
||||
return data_.size();
|
||||
}
|
||||
|
||||
double OpusPacket::duration_ms() const {
|
||||
return duration_ms_;
|
||||
}
|
||||
|
||||
ScopedVector<OpusPacket> BuildAllOpusPackets() {
|
||||
ScopedVector<OpusPacket> opus_packets;
|
||||
|
||||
for (int frame_count = kMinOpusPacketFrameCount;
|
||||
frame_count <= kMaxOpusPacketFrameCount; frame_count++) {
|
||||
for (int opus_config_num = 0; opus_config_num < kNumPossibleOpusConfigs;
|
||||
opus_config_num++) {
|
||||
bool is_VBR = false;
|
||||
opus_packets.push_back(
|
||||
new OpusPacket(opus_config_num, frame_count, is_VBR));
|
||||
|
||||
if (frame_count >= 2) {
|
||||
// Add another packet with VBR flag toggled. For frame counts >= 2,
|
||||
// VBR triggers changes to packet framing.
|
||||
is_VBR = true;
|
||||
opus_packets.push_back(
|
||||
new OpusPacket(opus_config_num, frame_count, is_VBR));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return opus_packets.Pass();
|
||||
}
|
||||
|
||||
} // namespace media
|
||||
} // namespace edash_packager
|
|
@ -1,45 +0,0 @@
|
|||
// Copyright (c) 2015 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
|
||||
#define MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "packager/base/memory/scoped_ptr.h"
|
||||
#include "packager/base/memory/scoped_vector.h"
|
||||
|
||||
namespace edash_packager {
|
||||
namespace media {
|
||||
|
||||
// From Opus RFC. See https://tools.ietf.org/html/rfc6716#page-14
|
||||
enum OpusConstants {
|
||||
kNumPossibleOpusConfigs = 32,
|
||||
kMinOpusPacketFrameCount = 1,
|
||||
kMaxOpusPacketFrameCount = 48
|
||||
};
|
||||
|
||||
class OpusPacket {
|
||||
public:
|
||||
OpusPacket(uint8_t config, uint8_t frame_count, bool is_VBR);
|
||||
~OpusPacket();
|
||||
|
||||
const uint8_t* data() const;
|
||||
int size() const;
|
||||
double duration_ms() const;
|
||||
|
||||
private:
|
||||
std::vector<uint8_t> data_;
|
||||
double duration_ms_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(OpusPacket);
|
||||
};
|
||||
|
||||
// Builds an exhaustive collection of Opus packet configurations.
|
||||
ScopedVector<OpusPacket> BuildAllOpusPackets();
|
||||
|
||||
} // namespace media
|
||||
} // namespace edash_packager
|
||||
|
||||
#endif // MEDIA_FORMATS_WEBM_OPUS_PACKET_BUILDER_H_
|
|
@ -69,8 +69,6 @@
|
|||
'cluster_builder.h',
|
||||
'encrypted_segmenter_unittest.cc',
|
||||
'multi_segment_segmenter_unittest.cc',
|
||||
'opus_packet_builder.cc',
|
||||
'opus_packet_builder.h',
|
||||
'segmenter_test_base.cc',
|
||||
'segmenter_test_base.h',
|
||||
'single_segment_segmenter_unittest.cc',
|
||||
|
|
|
@ -17,36 +17,12 @@
|
|||
#include "packager/media/formats/webm/webm_crypto_helpers.h"
|
||||
#include "packager/media/formats/webm/webm_webvtt_parser.h"
|
||||
|
||||
// Logs only while |count| < |max|, increments |count| for each log, and warns
|
||||
// in the log if |count| has just reached |max|.
|
||||
#define LIMITED_LOG(level, count, max) \
|
||||
LOG_IF(level, (count) < (max)) \
|
||||
<< (((count) + 1 == (max)) \
|
||||
? "(Log limit reached. Further similar entries " \
|
||||
"may be suppressed): " \
|
||||
: "")
|
||||
#define LIMITED_DLOG(level, count, max) \
|
||||
DLOG_IF(level, (count) < (max)) \
|
||||
<< (((count) + 1 == (max)) \
|
||||
? "(Log limit reached. Further similar entries " \
|
||||
"may be suppressed): " \
|
||||
: "")
|
||||
|
||||
namespace edash_packager {
|
||||
namespace media {
|
||||
namespace {
|
||||
|
||||
const int64_t kMicrosecondsPerMillisecond = 1000;
|
||||
|
||||
enum {
|
||||
// Limits the number of LOG() calls in the path of reading encoded
|
||||
// duration to avoid spamming for corrupted data.
|
||||
kMaxDurationErrorLogs = 10,
|
||||
// Limits the number of LOG() calls warning the user that buffer
|
||||
// durations have been estimated.
|
||||
kMaxDurationEstimateLogs = 10,
|
||||
};
|
||||
|
||||
// Helper function used to inspect block data to determine if the
|
||||
// block is a keyframe.
|
||||
// |data| contains the bytes in the block.
|
||||
|
@ -73,11 +49,6 @@ bool IsKeyframe(bool is_video,
|
|||
|
||||
} // namespace
|
||||
|
||||
const uint16_t WebMClusterParser::kOpusFrameDurationsMu[] = {
|
||||
10000, 20000, 40000, 60000, 10000, 20000, 40000, 60000, 10000, 20000, 40000,
|
||||
60000, 10000, 20000, 10000, 20000, 2500, 5000, 10000, 20000, 2500, 5000,
|
||||
10000, 20000, 2500, 5000, 10000, 20000, 2500, 5000, 10000, 20000};
|
||||
|
||||
WebMClusterParser::WebMClusterParser(
|
||||
int64_t timecode_scale,
|
||||
scoped_refptr<AudioStreamInfo> audio_stream_info,
|
||||
|
@ -169,100 +140,6 @@ int WebMClusterParser::Parse(const uint8_t* buf, int size) {
|
|||
return result;
|
||||
}
|
||||
|
||||
int64_t WebMClusterParser::TryGetEncodedAudioDuration(
|
||||
const uint8_t* data,
|
||||
int size) {
|
||||
|
||||
// Duration is currently read assuming the *entire* stream is unencrypted.
|
||||
// The special "Signal Byte" prepended to Blocks in encrypted streams is
|
||||
// assumed to not be present.
|
||||
// TODO: Consider parsing "Signal Byte" for encrypted streams to return
|
||||
// duration for any unencrypted blocks.
|
||||
|
||||
DCHECK(audio_stream_info_);
|
||||
if (audio_stream_info_->codec() == kCodecOpus) {
|
||||
return ReadOpusDuration(data, size);
|
||||
}
|
||||
|
||||
// TODO: Implement duration reading for Vorbis. See motivations in
|
||||
// http://crbug.com/396634.
|
||||
|
||||
return kNoTimestamp;
|
||||
}
|
||||
|
||||
int64_t WebMClusterParser::ReadOpusDuration(const uint8_t* data, int size) {
|
||||
// Masks and constants for Opus packets. See
|
||||
// https://tools.ietf.org/html/rfc6716#page-14
|
||||
static const uint8_t kTocConfigMask = 0xf8;
|
||||
static const uint8_t kTocFrameCountCodeMask = 0x03;
|
||||
static const uint8_t kFrameCountMask = 0x3f;
|
||||
static const int64_t kPacketDurationMaxMs = 120000;
|
||||
|
||||
if (size < 1) {
|
||||
LIMITED_DLOG(INFO, num_duration_errors_, kMaxDurationErrorLogs)
|
||||
<< "Invalid zero-byte Opus packet; demuxed block duration may be "
|
||||
"imprecise.";
|
||||
return kNoTimestamp;
|
||||
}
|
||||
|
||||
// Frame count type described by last 2 bits of Opus TOC byte.
|
||||
int frame_count_type = data[0] & kTocFrameCountCodeMask;
|
||||
|
||||
int frame_count = 0;
|
||||
switch (frame_count_type) {
|
||||
case 0:
|
||||
frame_count = 1;
|
||||
break;
|
||||
case 1:
|
||||
case 2:
|
||||
frame_count = 2;
|
||||
break;
|
||||
case 3:
|
||||
// Type 3 indicates an arbitrary frame count described in the next byte.
|
||||
if (size < 2) {
|
||||
LIMITED_DLOG(INFO, num_duration_errors_, kMaxDurationErrorLogs)
|
||||
<< "Second byte missing from 'Code 3' Opus packet; demuxed block "
|
||||
"duration may be imprecise.";
|
||||
return kNoTimestamp;
|
||||
}
|
||||
|
||||
frame_count = data[1] & kFrameCountMask;
|
||||
|
||||
if (frame_count == 0) {
|
||||
LIMITED_DLOG(INFO, num_duration_errors_, kMaxDurationErrorLogs)
|
||||
<< "Illegal 'Code 3' Opus packet with frame count zero; demuxed "
|
||||
"block duration may be imprecise.";
|
||||
return kNoTimestamp;
|
||||
}
|
||||
|
||||
break;
|
||||
default:
|
||||
LIMITED_DLOG(INFO, num_duration_errors_, kMaxDurationErrorLogs)
|
||||
<< "Unexpected Opus frame count type: " << frame_count_type << "; "
|
||||
<< "demuxed block duration may be imprecise.";
|
||||
return kNoTimestamp;
|
||||
}
|
||||
|
||||
int opusConfig = (data[0] & kTocConfigMask) >> 3;
|
||||
CHECK_GE(opusConfig, 0);
|
||||
CHECK_LT(opusConfig, static_cast<int>(arraysize(kOpusFrameDurationsMu)));
|
||||
|
||||
DCHECK_GT(frame_count, 0);
|
||||
int64_t duration = kOpusFrameDurationsMu[opusConfig] * frame_count;
|
||||
|
||||
if (duration > kPacketDurationMaxMs * 1000) {
|
||||
// Intentionally allowing packet to pass through for now. Decoder should
|
||||
// either handle or fail gracefully. LOG as breadcrumbs in case
|
||||
// things go sideways.
|
||||
LIMITED_DLOG(INFO, num_duration_errors_, kMaxDurationErrorLogs)
|
||||
<< "Warning, demuxed Opus packet with encoded duration: "
|
||||
<< duration / 1000 << "ms. Should be no greater than "
|
||||
<< kPacketDurationMaxMs << "ms.";
|
||||
}
|
||||
|
||||
return duration;
|
||||
}
|
||||
|
||||
WebMParserClient* WebMClusterParser::OnListStart(int id) {
|
||||
if (id == kWebMIdCluster) {
|
||||
cluster_timecode_ = -1;
|
||||
|
@ -451,13 +328,9 @@ bool WebMClusterParser::OnBlock(bool is_simple_block,
|
|||
Track* track = NULL;
|
||||
StreamType stream_type = kStreamAudio;
|
||||
std::string encryption_key_id;
|
||||
int64_t encoded_duration = kNoTimestamp;
|
||||
if (track_num == audio_.track_num()) {
|
||||
track = &audio_;
|
||||
encryption_key_id = audio_encryption_key_id_;
|
||||
if (encryption_key_id.empty()) {
|
||||
encoded_duration = TryGetEncodedAudioDuration(data, size);
|
||||
}
|
||||
} else if (track_num == video_.track_num()) {
|
||||
track = &video_;
|
||||
encryption_key_id = video_encryption_key_id_;
|
||||
|
@ -529,49 +402,13 @@ bool WebMClusterParser::OnBlock(bool is_simple_block,
|
|||
&side_data[0], side_data.size(), true);
|
||||
}
|
||||
|
||||
buffer->set_dts(timestamp);
|
||||
buffer->set_pts(timestamp);
|
||||
if (cluster_start_time_ == kNoTimestamp)
|
||||
cluster_start_time_ = timestamp;
|
||||
|
||||
int64_t block_duration_time_delta = kNoTimestamp;
|
||||
if (block_duration >= 0) {
|
||||
block_duration_time_delta = block_duration * timecode_multiplier_;
|
||||
}
|
||||
|
||||
// Prefer encoded duration over BlockGroup->BlockDuration or
|
||||
// TrackEntry->DefaultDuration when available. This layering violation is a
|
||||
// workaround for http://crbug.com/396634, decreasing the likelihood of
|
||||
// fall-back to rough estimation techniques for Blocks that lack a
|
||||
// BlockDuration at the end of a cluster. Duration estimation may still apply
|
||||
// in cases of encryption and codecs for which we do not extract encoded
|
||||
// duration. Estimates are applied as Block Timecode deltas, or once the whole
|
||||
// stream is parsed in the case of the last Block in the stream. See
|
||||
// Track::EmitBuffer and ApplyDurationEstimateIfNeeded().
|
||||
if (encoded_duration != kNoTimestamp) {
|
||||
DCHECK(encoded_duration != kInfiniteDuration);
|
||||
DCHECK(encoded_duration > 0);
|
||||
buffer->set_duration(encoded_duration);
|
||||
|
||||
DVLOG(3) << __FUNCTION__ << " : "
|
||||
<< "Using encoded duration " << encoded_duration;
|
||||
|
||||
if (block_duration_time_delta != kNoTimestamp) {
|
||||
int64_t duration_difference =
|
||||
block_duration_time_delta - encoded_duration;
|
||||
|
||||
const auto kWarnDurationDiff = timecode_multiplier_ * 2;
|
||||
if (duration_difference > kWarnDurationDiff) {
|
||||
LIMITED_DLOG(INFO, num_duration_errors_, kMaxDurationErrorLogs)
|
||||
<< "BlockDuration (" << block_duration_time_delta / 1000
|
||||
<< "ms) differs significantly from encoded duration ("
|
||||
<< encoded_duration / 1000 << "ms).";
|
||||
}
|
||||
}
|
||||
} else if (block_duration_time_delta != kNoTimestamp) {
|
||||
buffer->set_duration(block_duration_time_delta);
|
||||
} else {
|
||||
buffer->set_duration(track->default_duration());
|
||||
}
|
||||
buffer->set_duration(block_duration > 0
|
||||
? (block_duration * timecode_multiplier_)
|
||||
: kNoTimestamp);
|
||||
|
||||
if (!init_cb_.is_null() && !initialized_) {
|
||||
std::vector<scoped_refptr<StreamInfo>> streams;
|
||||
|
@ -679,16 +516,14 @@ void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
|
|||
int64_t estimated_duration = GetDurationEstimate();
|
||||
last_added_buffer_missing_duration_->set_duration(estimated_duration);
|
||||
|
||||
LIMITED_LOG(INFO, num_duration_estimates_, kMaxDurationEstimateLogs)
|
||||
<< "Estimating WebM block duration to be " << estimated_duration / 1000
|
||||
<< "ms for the last (Simple)Block in the Cluster for this Track. Use "
|
||||
"BlockGroups with BlockDurations at the end of each Track in a "
|
||||
"Cluster to avoid estimation.";
|
||||
VLOG(1) << "Track " << track_num_ << ": Estimating WebM block duration to be "
|
||||
<< estimated_duration / 1000
|
||||
<< "ms for the last (Simple)Block in the Cluster for this Track. Use "
|
||||
"BlockGroups with BlockDurations at the end of each Track in a "
|
||||
"Cluster to avoid estimation.";
|
||||
|
||||
DVLOG(2) << __FUNCTION__ << " new dur : ts "
|
||||
<< last_added_buffer_missing_duration_->pts()
|
||||
<< " dur "
|
||||
<< last_added_buffer_missing_duration_->duration()
|
||||
DVLOG(2) << " new dur : ts " << last_added_buffer_missing_duration_->pts()
|
||||
<< " dur " << last_added_buffer_missing_duration_->duration()
|
||||
<< " kf " << last_added_buffer_missing_duration_->is_key_frame()
|
||||
<< " size " << last_added_buffer_missing_duration_->data_size();
|
||||
|
||||
|
@ -702,7 +537,6 @@ void WebMClusterParser::Track::Reset() {
|
|||
last_added_buffer_missing_duration_ = NULL;
|
||||
}
|
||||
|
||||
|
||||
bool WebMClusterParser::Track::EmitBufferHelp(
|
||||
const scoped_refptr<MediaSample>& buffer) {
|
||||
DCHECK(!last_added_buffer_missing_duration_.get());
|
||||
|
@ -739,20 +573,25 @@ bool WebMClusterParser::Track::EmitBufferHelp(
|
|||
}
|
||||
|
||||
int64_t WebMClusterParser::Track::GetDurationEstimate() {
|
||||
int64_t duration = estimated_next_frame_duration_;
|
||||
if (duration != kNoTimestamp) {
|
||||
DVLOG(3) << __FUNCTION__ << " : using estimated duration";
|
||||
int64_t duration = kNoTimestamp;
|
||||
if (default_duration_ != kNoTimestamp) {
|
||||
duration = default_duration_;
|
||||
DVLOG(3) << __FUNCTION__ << " : using track default duration " << duration;
|
||||
} else if (estimated_next_frame_duration_ != kNoTimestamp) {
|
||||
duration = estimated_next_frame_duration_;
|
||||
DVLOG(3) << __FUNCTION__ << " : using estimated duration " << duration;
|
||||
} else {
|
||||
DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration";
|
||||
if (is_video_) {
|
||||
duration = kDefaultVideoBufferDurationInMs * kMicrosecondsPerMillisecond;
|
||||
} else {
|
||||
duration = kDefaultAudioBufferDurationInMs * kMicrosecondsPerMillisecond;
|
||||
}
|
||||
DVLOG(3) << __FUNCTION__ << " : using hardcoded default duration "
|
||||
<< duration;
|
||||
}
|
||||
|
||||
DCHECK(duration > 0);
|
||||
DCHECK(duration != kNoTimestamp);
|
||||
DCHECK_GT(duration, 0);
|
||||
DCHECK_NE(duration, kNoTimestamp);
|
||||
return duration;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,12 +32,6 @@ class WebMClusterParser : public WebMParserClient {
|
|||
kDefaultVideoBufferDurationInMs = 63
|
||||
};
|
||||
|
||||
/// Opus packets encode the duration and other parameters in the 5 most
|
||||
/// significant bits of the first byte. The index in this array corresponds
|
||||
/// to the duration of each frame of the packet in microseconds. See
|
||||
/// https://tools.ietf.org/html/rfc6716#page-14
|
||||
static const uint16_t kOpusFrameDurationsMu[];
|
||||
|
||||
private:
|
||||
// Helper class that manages per-track state.
|
||||
class Track {
|
||||
|
@ -56,19 +50,16 @@ class WebMClusterParser : public WebMParserClient {
|
|||
// duration, saves |buffer| into |last_added_buffer_missing_duration_|.
|
||||
bool EmitBuffer(const scoped_refptr<MediaSample>& buffer);
|
||||
|
||||
// If |last_added_buffer_missing_duration_| is set, updates its duration to
|
||||
// be non-kNoTimestamp value of |estimated_next_frame_duration_| or a
|
||||
// hard-coded default, then emits it and unsets
|
||||
// |last_added_buffer_missing_duration_|. (This method helps stream parser
|
||||
// emit all buffers in a media segment before signaling end of segment.)
|
||||
// If |last_added_buffer_missing_duration_| is set, estimate the duration
|
||||
// for this buffer using helper function GetDurationEstimate() then emits it
|
||||
// and unsets |last_added_buffer_missing_duration_| (This method helps
|
||||
// stream parser emit all buffers in a media segment).
|
||||
void ApplyDurationEstimateIfNeeded();
|
||||
|
||||
// Clears all buffer state, including any possibly held-aside buffer that
|
||||
// was missing duration.
|
||||
void Reset();
|
||||
|
||||
int64_t default_duration() const { return default_duration_; }
|
||||
|
||||
private:
|
||||
// Helper that sanity-checks |buffer| duration, updates
|
||||
// |estimated_next_frame_duration_|, and emits |buffer|.
|
||||
|
@ -76,28 +67,24 @@ class WebMClusterParser : public WebMParserClient {
|
|||
// emitted. Returns true otherwise.
|
||||
bool EmitBufferHelp(const scoped_refptr<MediaSample>& buffer);
|
||||
|
||||
// Helper that calculates the buffer duration to use in
|
||||
// Helper function that calculates the buffer duration to use in
|
||||
// ApplyDurationEstimateIfNeeded().
|
||||
int64_t GetDurationEstimate();
|
||||
|
||||
// Counts the number of estimated durations used in this track. Used to
|
||||
// prevent log spam for LOG()s about estimated duration.
|
||||
int num_duration_estimates_ = 0;
|
||||
|
||||
int track_num_;
|
||||
bool is_video_;
|
||||
|
||||
// Parsed track buffers, each with duration and in (decode) timestamp order,
|
||||
// that have not yet been emitted. Note that up to one additional buffer
|
||||
// missing duration may be tracked by |last_added_buffer_missing_duration_|.
|
||||
// Holding the sample that is missing duration. The duration will be
|
||||
// computed from the difference in timestamp when next sample arrives; or
|
||||
// estimated if it is the last sample in this track.
|
||||
scoped_refptr<MediaSample> last_added_buffer_missing_duration_;
|
||||
|
||||
// If kNoTimestamp, then |estimated_next_frame_duration_| will be used.
|
||||
int64_t default_duration_;
|
||||
|
||||
// If kNoTimestamp, then a default value will be used. This estimate is the
|
||||
// maximum duration seen so far for this track, and is used only if
|
||||
// |default_duration_| is kNoTimestamp.
|
||||
// If kNoTimestamp, then a hardcoded default value will be used. This
|
||||
// estimate is the maximum duration seen so far for this track, and is used
|
||||
// only if |default_duration_| is kNoTimestamp.
|
||||
int64_t estimated_next_frame_duration_;
|
||||
|
||||
MediaParser::NewSampleCB new_sample_cb_;
|
||||
|
@ -169,22 +156,9 @@ class WebMClusterParser : public WebMParserClient {
|
|||
// if that track num is not a text track.
|
||||
Track* FindTextTrack(int track_num);
|
||||
|
||||
// Attempts to read the duration from the encoded audio data, returning as
|
||||
// kNoTimestamp if duration cannot be retrieved.
|
||||
// Avoid calling if encrypted; may produce unexpected output. See
|
||||
// implementation for supported codecs.
|
||||
int64_t TryGetEncodedAudioDuration(const uint8_t* data, int size);
|
||||
// Multiplier used to convert timecodes into microseconds.
|
||||
double timecode_multiplier_;
|
||||
|
||||
// Reads Opus packet header to determine packet duration. Duration returned
|
||||
// as kNoTimestamp upon failure to read duration from packet.
|
||||
int64_t ReadOpusDuration(const uint8_t* data, int size);
|
||||
|
||||
// Tracks the number of LOGs made in process of reading encoded duration.
|
||||
// Useful to prevent log spam.
|
||||
int num_duration_errors_ = 0;
|
||||
|
||||
double timecode_multiplier_; // Multiplier used to convert timecodes into
|
||||
// microseconds.
|
||||
scoped_refptr<AudioStreamInfo> audio_stream_info_;
|
||||
scoped_refptr<VideoStreamInfo> video_stream_info_;
|
||||
std::set<int64_t> ignored_tracks_;
|
||||
|
@ -221,7 +195,7 @@ class WebMClusterParser : public WebMParserClient {
|
|||
Track video_;
|
||||
TextTrackMap text_track_map_;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(WebMClusterParser);
|
||||
DISALLOW_COPY_AND_ASSIGN(WebMClusterParser);
|
||||
};
|
||||
|
||||
} // namespace media
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include "packager/media/base/decrypt_config.h"
|
||||
#include "packager/media/base/timestamp.h"
|
||||
#include "packager/media/formats/webm/cluster_builder.h"
|
||||
#include "packager/media/formats/webm/opus_packet_builder.h"
|
||||
#include "packager/media/formats/webm/webm_constants.h"
|
||||
|
||||
using ::testing::HasSubstr;
|
||||
|
@ -475,15 +474,16 @@ TEST_F(WebMClusterParserTest, TracksWithSampleMissingDuration) {
|
|||
const int kExpectedVideoEstimationInMs = 33;
|
||||
|
||||
const BlockInfo kBlockInfo[] = {
|
||||
{kVideoTrackNum, 0, 33, true, NULL, 0},
|
||||
// Note that for simple blocks, duration is not encoded.
|
||||
{kVideoTrackNum, 0, 0, true, NULL, 0},
|
||||
{kAudioTrackNum, 0, 23, false, NULL, 0},
|
||||
{kTextTrackNum, 10, 42, false, NULL, 0},
|
||||
{kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kVideoTrackNum, 33, 33, true, NULL, 0},
|
||||
{kAudioTrackNum, 36, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kVideoTrackNum, 66, kExpectedVideoEstimationInMs, true, NULL, 0},
|
||||
{kAudioTrackNum, 70, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kAudioTrackNum, 83, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kAudioTrackNum, 23, 0, true, NULL, 0},
|
||||
{kVideoTrackNum, 33, 0, true, NULL, 0},
|
||||
{kAudioTrackNum, 36, 0, true, NULL, 0},
|
||||
{kVideoTrackNum, 66, 0, true, NULL, 0},
|
||||
{kAudioTrackNum, 70, 0, true, NULL, 0},
|
||||
{kAudioTrackNum, 83, 0, true, NULL, 0},
|
||||
};
|
||||
|
||||
// Samples are not emitted in the same order as |kBlockInfo| due to missing of
|
||||
|
@ -491,24 +491,24 @@ TEST_F(WebMClusterParserTest, TracksWithSampleMissingDuration) {
|
|||
const BlockInfo kExpectedBlockInfo[] = {
|
||||
{kAudioTrackNum, 0, 23, false, NULL, 0},
|
||||
{kTextTrackNum, 10, 42, false, NULL, 0},
|
||||
{kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kVideoTrackNum, 0, 33, true, NULL, 0},
|
||||
{kAudioTrackNum, 36, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kAudioTrackNum, 23, 13, true, NULL, 0},
|
||||
{kVideoTrackNum, 33, 33, true, NULL, 0},
|
||||
{kAudioTrackNum, 70, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kAudioTrackNum, 36, 34, true, NULL, 0},
|
||||
{kAudioTrackNum, 70, 13, true, NULL, 0},
|
||||
{kAudioTrackNum, 83, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kVideoTrackNum, 66, kExpectedVideoEstimationInMs, true, NULL, 0},
|
||||
};
|
||||
const int kExpectedBuffersOnPartialCluster[] = {
|
||||
0, // Video simple block without DefaultDuration should be held back
|
||||
1, // Audio buffer ready
|
||||
0, // Video simple block without duration should be held back
|
||||
1, // 1st audio buffer ready
|
||||
2, // Text buffer ready
|
||||
3, // 2nd audio buffer ready
|
||||
4, // 1st video emitted, 2nd video held back with no duration
|
||||
5, // 3rd audio ready
|
||||
6, // 2nd video emitted, 3rd video held back with no duration
|
||||
7, // 4th audio ready
|
||||
8, // 5th audio ready
|
||||
2, // Audio simple block without duration should be held back
|
||||
3, // 1st video emitted, 2nd video held back with no duration
|
||||
4, // 2rd audio ready, 3rd audio held back with no duration
|
||||
5, // 2nd video emitted, 3rd video held back with no duration
|
||||
6, // 3th audio ready, 4th audio held back with no duration
|
||||
7, // 4th audio ready, 5th audio held back with no duration
|
||||
};
|
||||
|
||||
ASSERT_EQ(arraysize(kBlockInfo), arraysize(kExpectedBuffersOnPartialCluster));
|
||||
|
@ -531,9 +531,11 @@ TEST_F(WebMClusterParserTest, TracksWithSampleMissingDuration) {
|
|||
VerifyBuffers(kExpectedBlockInfo, kExpectedBuffersOnPartialCluster[i]));
|
||||
}
|
||||
|
||||
// The last (3rd) video is emitted on flush with duration estimated.
|
||||
// The last audio (5th) and the last video (3rd) are emitted on flush with
|
||||
// duration estimated - estimated to be default duration if it is specified,
|
||||
// otherwise estimated from earlier frames.
|
||||
parser_->Flush();
|
||||
EXPECT_TRUE(VerifyBuffers(&kExpectedBlockInfo[block_count - 1], 1));
|
||||
EXPECT_TRUE(VerifyBuffers(&kExpectedBlockInfo[block_count - 2], 2));
|
||||
}
|
||||
|
||||
TEST_F(WebMClusterParserTest, Reset) {
|
||||
|
@ -875,11 +877,12 @@ TEST_F(WebMClusterParserTest, ParseWithDefaultDurationsSimpleBlocks) {
|
|||
EXPECT_LT(kTestVideoFrameDefaultDurationInMs, 33);
|
||||
|
||||
const BlockInfo kBlockInfo[] = {
|
||||
{kAudioTrackNum, 0, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kAudioTrackNum, 23, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kVideoTrackNum, 33, kTestVideoFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kAudioTrackNum, 46, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kVideoTrackNum, 67, kTestVideoFrameDefaultDurationInMs, true, NULL, 0},
|
||||
// Note that for simple blocks, duration is not encoded.
|
||||
{kAudioTrackNum, 0, 23, true, NULL, 0},
|
||||
{kAudioTrackNum, 23, 23, true, NULL, 0},
|
||||
{kVideoTrackNum, 33, 34, true, NULL, 0},
|
||||
{kAudioTrackNum, 46, 23, true, NULL, 0},
|
||||
{kVideoTrackNum, 67, 33, true, NULL, 0},
|
||||
{kAudioTrackNum, 69, kTestAudioFrameDefaultDurationInMs, true, NULL, 0},
|
||||
{kVideoTrackNum, 100, kTestVideoFrameDefaultDurationInMs, true, NULL, 0},
|
||||
};
|
||||
|
@ -887,21 +890,17 @@ TEST_F(WebMClusterParserTest, ParseWithDefaultDurationsSimpleBlocks) {
|
|||
int block_count = arraysize(kBlockInfo);
|
||||
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
|
||||
|
||||
// Send slightly less than the full cluster so all but the last block is
|
||||
// parsed. Though all the blocks are simple blocks, none should be held aside
|
||||
// for duration estimation prior to end of cluster detection because all the
|
||||
// tracks have DefaultDurations.
|
||||
int result = parser_->Parse(cluster->data(), cluster->size() - 1);
|
||||
EXPECT_GT(result, 0);
|
||||
EXPECT_LT(result, cluster->size());
|
||||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count - 1));
|
||||
|
||||
parser_->Reset();
|
||||
|
||||
// Now parse a whole cluster to verify that all the blocks will get parsed.
|
||||
result = parser_->Parse(cluster->data(), cluster->size());
|
||||
// Now parse a whole cluster to verify that all the blocks will get parsed
|
||||
// and the last audio and video are held back due to no duration.
|
||||
// The durations for all blocks are calculated to be the timestamp difference
|
||||
// with the next block.
|
||||
int result = parser_->Parse(cluster->data(), cluster->size());
|
||||
EXPECT_EQ(cluster->size(), result);
|
||||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count));
|
||||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count - 2));
|
||||
// The last audio and video are emitted on flush wiht duration estimated -
|
||||
// estimated to be default_duration since it is present.
|
||||
parser_->Flush();
|
||||
ASSERT_TRUE(VerifyBuffers(&kBlockInfo[block_count - 2], 2));
|
||||
}
|
||||
|
||||
TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
|
||||
|
@ -1008,49 +1007,6 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
|
|||
ASSERT_TRUE(VerifyBuffers(kBlockInfo2, block_count2));
|
||||
}
|
||||
|
||||
// TODO: Is parser behavior correct? See http://crbug.com/363433.
|
||||
TEST_F(WebMClusterParserTest,
|
||||
ParseWithDefaultDurationsBlockGroupsWithoutDurations) {
|
||||
InSequence s;
|
||||
ResetParserToHaveDefaultDurations();
|
||||
|
||||
EXPECT_LT(kTestAudioFrameDefaultDurationInMs, 23);
|
||||
EXPECT_LT(kTestVideoFrameDefaultDurationInMs, 33);
|
||||
|
||||
const BlockInfo kBlockInfo[] = {
|
||||
{kAudioTrackNum, 0, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
|
||||
{kAudioTrackNum, 23, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
|
||||
{kVideoTrackNum, 33, -kTestVideoFrameDefaultDurationInMs, false, NULL, 0},
|
||||
{kAudioTrackNum, 46, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
|
||||
{kVideoTrackNum, 67, -kTestVideoFrameDefaultDurationInMs, false, NULL, 0},
|
||||
{kAudioTrackNum, 69, -kTestAudioFrameDefaultDurationInMs, false, NULL, 0},
|
||||
{kVideoTrackNum,
|
||||
100,
|
||||
-kTestVideoFrameDefaultDurationInMs,
|
||||
false,
|
||||
NULL,
|
||||
0},
|
||||
};
|
||||
|
||||
int block_count = arraysize(kBlockInfo);
|
||||
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
|
||||
|
||||
// Send slightly less than the full cluster so all but the last block is
|
||||
// parsed. None should be held aside for duration estimation prior to end of
|
||||
// cluster detection because all the tracks have DefaultDurations.
|
||||
int result = parser_->Parse(cluster->data(), cluster->size() - 1);
|
||||
EXPECT_GT(result, 0);
|
||||
EXPECT_LT(result, cluster->size());
|
||||
parser_->Flush();
|
||||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count - 1));
|
||||
|
||||
// Now parse a whole cluster to verify that all the blocks will get parsed.
|
||||
result = parser_->Parse(cluster->data(), cluster->size());
|
||||
EXPECT_EQ(cluster->size(), result);
|
||||
parser_->Flush();
|
||||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count));
|
||||
}
|
||||
|
||||
TEST_F(WebMClusterParserTest,
|
||||
ParseDegenerateClusterYieldsHardcodedEstimatedDurations) {
|
||||
const BlockInfo kBlockInfo[] = {
|
||||
|
@ -1092,99 +1048,5 @@ TEST_F(WebMClusterParserTest,
|
|||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count));
|
||||
}
|
||||
|
||||
TEST_F(WebMClusterParserTest, ReadOpusDurationsSimpleBlockAtEndOfCluster) {
|
||||
int loop_count = 0;
|
||||
for (const auto* packet_ptr : BuildAllOpusPackets()) {
|
||||
InSequence s;
|
||||
|
||||
// Get a new parser each iteration to prevent exceeding the log cap.
|
||||
parser_.reset(CreateParserWithKeyIdsAndAudioCodec(
|
||||
std::string(), std::string(), kCodecOpus));
|
||||
|
||||
const BlockInfo kBlockInfo[] = {{kAudioTrackNum,
|
||||
0,
|
||||
packet_ptr->duration_ms(),
|
||||
true, // Make it a SimpleBlock.
|
||||
packet_ptr->data(),
|
||||
packet_ptr->size()}};
|
||||
|
||||
int block_count = arraysize(kBlockInfo);
|
||||
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
|
||||
|
||||
int result = parser_->Parse(cluster->data(), cluster->size());
|
||||
EXPECT_EQ(cluster->size(), result);
|
||||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count));
|
||||
|
||||
loop_count++;
|
||||
}
|
||||
|
||||
// Test should minimally cover all the combinations of config and frame count.
|
||||
ASSERT_GE(loop_count, kNumPossibleOpusConfigs * kMaxOpusPacketFrameCount);
|
||||
}
|
||||
|
||||
TEST_F(WebMClusterParserTest, PreferOpusDurationsOverBlockDurations) {
|
||||
int loop_count = 0;
|
||||
for (const auto* packet_ptr : BuildAllOpusPackets()) {
|
||||
InSequence s;
|
||||
|
||||
// Get a new parser each iteration to prevent exceeding the log cap.
|
||||
parser_.reset(CreateParserWithKeyIdsAndAudioCodec(
|
||||
std::string(), std::string(), kCodecOpus));
|
||||
|
||||
// Setting BlockDuration != Opus duration to see which one the parser uses.
|
||||
int block_duration_ms = packet_ptr->duration_ms() + 10;
|
||||
BlockInfo block_infos[] = {{kAudioTrackNum,
|
||||
0,
|
||||
static_cast<double>(block_duration_ms),
|
||||
false, // Not a SimpleBlock.
|
||||
packet_ptr->data(),
|
||||
packet_ptr->size()}};
|
||||
|
||||
int block_count = arraysize(block_infos);
|
||||
scoped_ptr<Cluster> cluster(CreateCluster(0, block_infos, block_count));
|
||||
int result = parser_->Parse(cluster->data(), cluster->size());
|
||||
EXPECT_EQ(cluster->size(), result);
|
||||
|
||||
// BlockInfo duration will be used to verify buffer duration, so changing
|
||||
// duration to be that of the Opus packet to verify it was preferred.
|
||||
block_infos[0].duration = packet_ptr->duration_ms();
|
||||
|
||||
ASSERT_TRUE(VerifyBuffers(block_infos, block_count));
|
||||
|
||||
loop_count++;
|
||||
}
|
||||
|
||||
// Test should minimally cover all the combinations of config and frame count.
|
||||
ASSERT_GE(loop_count, kNumPossibleOpusConfigs * kMaxOpusPacketFrameCount);
|
||||
}
|
||||
|
||||
// Tests that BlockDuration is used to set duration on buffer rather than
|
||||
// encoded duration in Opus packet (or hard coded duration estimates). Encoded
|
||||
// Opus duration is usually preferred but cannot be known when encrypted.
|
||||
TEST_F(WebMClusterParserTest, DontReadEncodedDurationWhenEncrypted) {
|
||||
// Non-empty dummy value signals encryption is active for audio.
|
||||
std::string audio_encryption_id("audio_key_id");
|
||||
|
||||
// Reset parser to expect Opus codec audio and use audio encryption key id.
|
||||
parser_.reset(CreateParserWithKeyIdsAndAudioCodec(audio_encryption_id,
|
||||
std::string(), kCodecOpus));
|
||||
|
||||
// Single Block with BlockDuration and encrypted data.
|
||||
const BlockInfo kBlockInfo[] = {{kAudioTrackNum,
|
||||
0,
|
||||
kTestAudioFrameDefaultDurationInMs,
|
||||
false, // Not a SimpleBlock
|
||||
kEncryptedFrame, // Encrypted frame data
|
||||
arraysize(kEncryptedFrame)}};
|
||||
|
||||
int block_count = arraysize(kBlockInfo);
|
||||
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
|
||||
int result = parser_->Parse(cluster->data(), cluster->size());
|
||||
EXPECT_EQ(cluster->size(), result);
|
||||
|
||||
// Will verify that duration of buffer matches that of BlockDuration.
|
||||
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count));
|
||||
}
|
||||
|
||||
} // namespace media
|
||||
} // namespace edash_packager
|
||||
|
|
Loading…
Reference in New Issue