Handle possible NewSampleEvent before ParserInitEvent

For WebM contents, it is possible that we may receive some NewSampleEvent
before receiving ParserInitEvent. This is because init event is fired
after analyzing a video block which could come after an audio block.

Also modified Flush() function to return a bool to indicate whether the
the flush is successful and whether the samples are handled correctly.

And added macro to ensure Flush() and Parse() results are handled.

Fixes #71

Change-Id: I2294d6f529f54e4578344916559bb1bc116c745a
This commit is contained in:
KongQun Yang 2016-01-21 10:30:29 -08:00
parent cd74066bb4
commit 2c3aed4842
19 changed files with 150 additions and 74 deletions

View File

@ -22,9 +22,13 @@
#include "packager/media/formats/wvm/wvm_media_parser.h"
namespace {
const size_t kInitBufSize = 0x10000; // 65KB, sufficient to determine the
// container and likely all init data.
// 65KB, sufficient to determine the container and likely all init data.
const size_t kInitBufSize = 0x10000;
const size_t kBufSize = 0x200000; // 2MB
// Maximum number of allowed queued samples. If we are receiving a lot of
// samples before seeing init_event, something is not right. The number
// set here is arbitrary though.
const size_t kQueuedSamplesLimit = 10000;
}
namespace edash_packager {
@ -126,14 +130,40 @@ void Demuxer::ParserInitEvent(
}
}
Demuxer::QueuedSample::QueuedSample(uint32_t local_track_id,
scoped_refptr<MediaSample> local_sample)
: track_id(local_track_id), sample(local_sample) {}
Demuxer::QueuedSample::~QueuedSample() {}
bool Demuxer::NewSampleEvent(uint32_t track_id,
const scoped_refptr<MediaSample>& sample) {
if (!init_event_received_) {
if (queued_samples_.size() >= kQueuedSamplesLimit) {
LOG(ERROR) << "Queued samples limit reached: " << kQueuedSamplesLimit;
return false;
}
queued_samples_.push_back(QueuedSample(track_id, sample));
return true;
}
while (!queued_samples_.empty()) {
if (!PushSample(queued_samples_.front().track_id,
queued_samples_.front().sample)) {
return false;
}
queued_samples_.pop_front();
}
return PushSample(track_id, sample);
}
bool Demuxer::PushSample(uint32_t track_id,
const scoped_refptr<MediaSample>& sample) {
std::vector<MediaStream*>::iterator it = streams_.begin();
for (; it != streams_.end(); ++it) {
if (track_id == (*it)->info()->track_id()) {
return (*it)->PushSample(sample).ok();
}
}
LOG(ERROR) << "Track " << track_id << " not found.";
return false;
}
@ -183,7 +213,8 @@ Status Demuxer::Parse() {
int64_t bytes_read = media_file_->Read(buffer_.get(), kBufSize);
if (bytes_read == 0) {
parser_->Flush();
if (!parser_->Flush())
return Status(error::PARSER_FAILURE, "Failed to flush.");
return Status(error::END_OF_STREAM, "");
} else if (bytes_read < 0) {
return Status(error::FILE_FAILURE, "Cannot read file " + file_name_);

View File

@ -7,8 +7,10 @@
#ifndef MEDIA_BASE_DEMUXER_H_
#define MEDIA_BASE_DEMUXER_H_
#include <deque>
#include <vector>
#include "packager/base/compiler_specific.h"
#include "packager/base/memory/ref_counted.h"
#include "packager/base/memory/scoped_ptr.h"
#include "packager/media/base/container_names.h"
@ -70,15 +72,30 @@ class Demuxer {
MediaContainerName container_name() { return container_name_; }
private:
// Parser event handlers.
struct QueuedSample {
QueuedSample(uint32_t track_id, scoped_refptr<MediaSample> sample);
~QueuedSample();
uint32_t track_id;
scoped_refptr<MediaSample> sample;
};
// Parser init event.
void ParserInitEvent(const std::vector<scoped_refptr<StreamInfo> >& streams);
// Parser new sample event handler. Queues the samples if init event has not
// been received, otherwise calls PushSample() to push the sample to
// corresponding stream.
bool NewSampleEvent(uint32_t track_id,
const scoped_refptr<MediaSample>& sample);
// Helper function to push the sample to corresponding stream.
bool PushSample(uint32_t track_id, const scoped_refptr<MediaSample>& sample);
std::string file_name_;
File* media_file_;
bool init_event_received_;
Status init_parsing_status_;
// Queued samples received in NewSampleEvent() before ParserInitEvent().
std::deque<QueuedSample> queued_samples_;
scoped_ptr<MediaParser> parser_;
std::vector<MediaStream*> streams_;
MediaContainerName container_name_;

View File

@ -11,6 +11,7 @@
#include <vector>
#include "packager/base/callback.h"
#include "packager/base/compiler_specific.h"
#include "packager/base/memory/ref_counted.h"
#include "packager/base/memory/scoped_ptr.h"
#include "packager/media/base/container_names.h"
@ -55,11 +56,12 @@ class MediaParser {
/// Flush data currently in the parser and put the parser in a state where it
/// can receive data for a new seek point.
virtual void Flush() = 0;
/// @return true if successful, false otherwise.
virtual bool Flush() WARN_UNUSED_RESULT = 0;
/// Should be called when there is new data to parse.
/// @return true if successful.
virtual bool Parse(const uint8_t* buf, int size) = 0;
virtual bool Parse(const uint8_t* buf, int size) WARN_UNUSED_RESULT = 0;
private:
DISALLOW_COPY_AND_ASSIGN(MediaParser);

View File

@ -166,7 +166,7 @@ void Mp2tMediaParser::Init(
new_sample_cb_ = new_sample_cb;
}
void Mp2tMediaParser::Flush() {
bool Mp2tMediaParser::Flush() {
DVLOG(1) << "Mp2tMediaParser::Flush";
// Flush the buffers and reset the pids.
@ -176,12 +176,13 @@ void Mp2tMediaParser::Flush() {
PidState* pid_state = it->second;
pid_state->Flush();
}
EmitRemainingSamples();
bool result = EmitRemainingSamples();
STLDeleteValues(&pids_);
// Remove any bytes left in the TS buffer.
// (i.e. any partial TS packet => less than 188 bytes).
ts_byte_queue_.Reset();
return result;
}
bool Mp2tMediaParser::Parse(const uint8_t* buf, int size) {

View File

@ -8,6 +8,7 @@
#include <deque>
#include <map>
#include "packager/base/compiler_specific.h"
#include "packager/base/memory/ref_counted.h"
#include "packager/base/memory/scoped_ptr.h"
#include "packager/media/base/byte_queue.h"
@ -32,14 +33,14 @@ class Mp2tMediaParser : public MediaParser {
Mp2tMediaParser();
~Mp2tMediaParser() override;
// MediaParser implementation overrides.
/// @name MediaParser implementation overrides.
/// @{
void Init(const InitCB& init_cb,
const NewSampleCB& new_sample_cb,
KeySource* decryption_key_source) override;
void Flush() override;
bool Parse(const uint8_t* buf, int size) override;
bool Flush() override WARN_UNUSED_RESULT;
bool Parse(const uint8_t* buf, int size) override WARN_UNUSED_RESULT;
/// @}
private:
typedef std::map<int, PidState*> PidMap;

View File

@ -125,7 +125,7 @@ TEST_F(Mp2tMediaParserTest, UnalignedAppend17) {
// Test small, non-segment-aligned appends.
ParseMpeg2TsFile("bear-1280x720.ts", 17);
EXPECT_EQ(video_frame_count_, 80);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
EXPECT_EQ(video_frame_count_, 82);
}
@ -133,7 +133,7 @@ TEST_F(Mp2tMediaParserTest, UnalignedAppend512) {
// Test small, non-segment-aligned appends.
ParseMpeg2TsFile("bear-1280x720.ts", 512);
EXPECT_EQ(video_frame_count_, 80);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
EXPECT_EQ(video_frame_count_, 82);
}
@ -143,7 +143,7 @@ TEST_F(Mp2tMediaParserTest, TimestampWrapAround) {
// (close to 2^33 / 90000) which results in timestamps wrap around
// in the Mpeg2 TS stream.
ParseMpeg2TsFile("bear-1280x720_ptswraparound.ts", 512);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
EXPECT_EQ(video_frame_count_, 82);
EXPECT_GE(video_min_dts_, static_cast<int64_t>(95443 - 1) * kMpeg2Timescale);
EXPECT_LE(video_max_dts_, static_cast<int64_t>(95443 + 4) * kMpeg2Timescale);

View File

@ -119,10 +119,11 @@ void MP4MediaParser::Reset() {
mdat_tail_ = 0;
}
void MP4MediaParser::Flush() {
bool MP4MediaParser::Flush() {
DCHECK_NE(state_, kWaitingForInit);
Reset();
ChangeState(kParsingBoxes);
return true;
}
bool MP4MediaParser::Parse(const uint8_t* buf, int size) {

View File

@ -40,8 +40,8 @@ class MP4MediaParser : public MediaParser {
void Init(const InitCB& init_cb,
const NewSampleCB& new_sample_cb,
KeySource* decryption_key_source) override;
void Flush() override;
bool Parse(const uint8_t* buf, int size) override;
bool Flush() override WARN_UNUSED_RESULT;
bool Parse(const uint8_t* buf, int size) override WARN_UNUSED_RESULT;
/// @}
/// Handles ISO-BMFF containers which have the 'moov' box trailing the

View File

@ -194,7 +194,7 @@ TEST_F(MP4MediaParserTest, Flush) {
std::vector<uint8_t> buffer = ReadTestDataFile("bear-640x360-av_frag.mp4");
EXPECT_TRUE(AppendDataInPieces(buffer.data(), 65536, 512));
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
EXPECT_EQ(2u, num_streams_);
EXPECT_NE(0u, num_samples_);
num_samples_ = 0;
@ -214,7 +214,7 @@ TEST_F(MP4MediaParserTest, NoMoovAfterFlush) {
std::vector<uint8_t> buffer = ReadTestDataFile("bear-640x360-av_frag.mp4");
EXPECT_TRUE(AppendDataInPieces(buffer.data(), buffer.size(), 512));
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
const int kFirstMoofOffset = 1308;
EXPECT_TRUE(AppendDataInPieces(

View File

@ -103,11 +103,12 @@ void WebMClusterParser::Reset() {
ResetTextTracks();
}
void WebMClusterParser::Flush() {
bool WebMClusterParser::Flush() {
// Estimate the duration of the last frame if necessary.
audio_.ApplyDurationEstimateIfNeeded();
video_.ApplyDurationEstimateIfNeeded();
bool audio_result = audio_.ApplyDurationEstimateIfNeeded();
bool video_result = video_.ApplyDurationEstimateIfNeeded();
Reset();
return audio_result && video_result;
}
int WebMClusterParser::Parse(const uint8_t* buf, int size) {
@ -524,9 +525,9 @@ bool WebMClusterParser::Track::EmitBuffer(
return EmitBufferHelp(buffer);
}
void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
bool WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
if (!last_added_buffer_missing_duration_.get())
return;
return true;
int64_t estimated_duration = GetDurationEstimate();
last_added_buffer_missing_duration_->set_duration(estimated_duration);
@ -544,8 +545,10 @@ void WebMClusterParser::Track::ApplyDurationEstimateIfNeeded() {
// Don't use the applied duration as a future estimation (don't use
// EmitBufferHelp() here.)
new_sample_cb_.Run(track_num_, last_added_buffer_missing_duration_);
if (!new_sample_cb_.Run(track_num_, last_added_buffer_missing_duration_))
return false;
last_added_buffer_missing_duration_ = NULL;
return true;
}
void WebMClusterParser::Track::Reset() {
@ -583,8 +586,7 @@ bool WebMClusterParser::Track::EmitBufferHelp(
}
}
new_sample_cb_.Run(track_num_, buffer);
return true;
return new_sample_cb_.Run(track_num_, buffer);
}
int64_t WebMClusterParser::Track::GetDurationEstimate() {

View File

@ -10,6 +10,7 @@
#include <set>
#include <string>
#include "packager/base/compiler_specific.h"
#include "packager/base/memory/scoped_ptr.h"
#include "packager/media/base/decryptor_source.h"
#include "packager/media/base/media_parser.h"
@ -55,7 +56,7 @@ class WebMClusterParser : public WebMParserClient {
// for this buffer using helper function GetDurationEstimate() then emits it
// and unsets |last_added_buffer_missing_duration_| (This method helps
// stream parser emit all buffers in a media segment).
void ApplyDurationEstimateIfNeeded();
bool ApplyDurationEstimateIfNeeded();
// Clears all buffer state, including any possibly held-aside buffer that
// was missing duration.
@ -135,7 +136,8 @@ class WebMClusterParser : public WebMParserClient {
/// Flush data currently in the parser and reset the parser so it can accept a
/// new cluster.
void Flush();
/// @return true on success, false otherwise.
bool Flush() WARN_UNUSED_RESULT;
/// Parses a WebM cluster element in |buf|.
/// @return -1 if the parse fails.

View File

@ -555,7 +555,7 @@ TEST_F(WebMClusterParserTest, TracksWithSampleMissingDuration) {
// The last audio (5th) and the last video (3rd) are emitted on flush with
// duration estimated - estimated to be default duration if it is specified,
// otherwise estimated from earlier frames.
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
EXPECT_TRUE(VerifyBuffers(&kExpectedBlockInfo[block_count - 2], 2));
}
@ -699,7 +699,7 @@ TEST_F(WebMClusterParserTest, IgnoredTracks) {
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_TRUE(VerifyBuffers(kOutputBlockInfo, output_block_count));
}
@ -729,7 +729,7 @@ TEST_F(WebMClusterParserTest, ParseTextTracks) {
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_TRUE(VerifyBuffers(kInputBlockInfo, input_block_count));
}
@ -844,7 +844,7 @@ TEST_F(WebMClusterParserTest, ParseEncryptedBlock) {
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_EQ(1UL, video_buffers_.size());
scoped_refptr<MediaSample> buffer = video_buffers_[0];
EXPECT_EQ(std::vector<uint8_t>(
@ -886,7 +886,7 @@ TEST_F(WebMClusterParserTest, ParseClearFrameInEncryptedTrack) {
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_EQ(1UL, video_buffers_.size());
scoped_refptr<MediaSample> buffer = video_buffers_[0];
EXPECT_EQ(std::vector<uint8_t>(
@ -965,7 +965,7 @@ TEST_F(WebMClusterParserTest, ParseWithDefaultDurationsSimpleBlocks) {
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count - 2));
// The last audio and video are emitted on flush wiht duration estimated -
// estimated to be default_duration since it is present.
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_TRUE(VerifyBuffers(&kBlockInfo[block_count - 2], 2));
}
@ -1017,7 +1017,7 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsSimpleBlocks) {
ASSERT_TRUE(VerifyBuffers(&kBlockInfo1[block_count1 - 2], 2));
// Now flush and verify blocks in cluster2 are emitted.
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_TRUE(VerifyBuffers(kBlockInfo2, block_count2));
}
@ -1069,7 +1069,7 @@ TEST_F(WebMClusterParserTest, ParseWithoutAnyDurationsBlockGroups) {
ASSERT_TRUE(VerifyBuffers(&kBlockInfo1[block_count1 - 2], 2));
// Now flush and verify blocks in cluster2 are emitted.
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_TRUE(VerifyBuffers(kBlockInfo2, block_count2));
}
@ -1093,7 +1093,7 @@ TEST_F(WebMClusterParserTest,
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count));
}
@ -1110,7 +1110,7 @@ TEST_F(WebMClusterParserTest,
scoped_ptr<Cluster> cluster(CreateCluster(0, kBlockInfo, block_count));
int result = parser_->Parse(cluster->data(), cluster->size());
EXPECT_EQ(cluster->size(), result);
parser_->Flush();
EXPECT_TRUE(parser_->Flush());
ASSERT_TRUE(VerifyBuffers(kBlockInfo, block_count));
}

View File

@ -41,15 +41,17 @@ void WebMMediaParser::Init(const InitCB& init_cb,
ignore_text_tracks_ = true;
}
void WebMMediaParser::Flush() {
bool WebMMediaParser::Flush() {
DCHECK_NE(state_, kWaitingForInit);
byte_queue_.Reset();
bool result = true;
if (cluster_parser_)
cluster_parser_->Flush();
result = cluster_parser_->Flush();
if (state_ == kParsingClusters) {
ChangeState(kParsingHeaders);
}
return result;
}
bool WebMMediaParser::Parse(const uint8_t* buf, int size) {

View File

@ -6,6 +6,7 @@
#define MEDIA_FORMATS_WEBM_WEBM_MEDIA_PARSER_H_
#include "packager/base/callback_forward.h"
#include "packager/base/compiler_specific.h"
#include "packager/base/memory/ref_counted.h"
#include "packager/media/base/byte_queue.h"
#include "packager/media/base/media_parser.h"
@ -20,12 +21,14 @@ class WebMMediaParser : public MediaParser {
WebMMediaParser();
~WebMMediaParser() override;
/// StreamParser implementation.
/// @name MediaParser implementation overrides.
/// @{
void Init(const InitCB& init_cb,
const NewSampleCB& new_sample_cb,
KeySource* decryption_key_source) override;
void Flush() override;
bool Parse(const uint8_t* buf, int size) override;
bool Flush() override WARN_UNUSED_RESULT;
bool Parse(const uint8_t* buf, int size) override WARN_UNUSED_RESULT;
/// @}
private:
enum State {

View File

@ -225,10 +225,10 @@ void WebVttMediaParser::Init(const InitCB& init_cb,
new_sample_cb_ = new_sample_cb;
}
void WebVttMediaParser::Flush() {
bool WebVttMediaParser::Flush() {
// If not in one of these states just be ready for more data.
if (state_ != kCuePayload && state_ != kComment)
return;
return true;
if (!data_.empty()) {
// If it was in the middle of the payload and the stream finished, then this
@ -241,9 +241,10 @@ void WebVttMediaParser::Flush() {
data_.clear();
}
new_sample_cb_.Run(kTrackId, CueToMediaSample(current_cue_));
bool result = new_sample_cb_.Run(kTrackId, CueToMediaSample(current_cue_));
current_cue_ = Cue();
state_ = kCueIdentifierOrTimingOrComment;
return result;
}
bool WebVttMediaParser::Parse(const uint8_t* buf, int size) {
@ -353,7 +354,10 @@ bool WebVttMediaParser::Parse(const uint8_t* buf, int size) {
case kCuePayload: {
if (line.empty()) {
state_ = kCueIdentifierOrTimingOrComment;
new_sample_cb_.Run(kTrackId, CueToMediaSample(current_cue_));
if (!new_sample_cb_.Run(kTrackId, CueToMediaSample(current_cue_))) {
state_ = kParseError;
return false;
}
current_cue_ = Cue();
break;
}
@ -364,7 +368,10 @@ bool WebVttMediaParser::Parse(const uint8_t* buf, int size) {
case kComment: {
if (line.empty()) {
state_ = kCueIdentifierOrTimingOrComment;
new_sample_cb_.Run(kTrackId, CueToMediaSample(current_cue_));
if (!new_sample_cb_.Run(kTrackId, CueToMediaSample(current_cue_))) {
state_ = kParseError;
return false;
}
current_cue_ = Cue();
break;
}

View File

@ -7,12 +7,13 @@
#ifndef MEDIA_FORMATS_WEBVTT_WEBVTT_MEDIA_PARSER_H_
#define MEDIA_FORMATS_WEBVTT_WEBVTT_MEDIA_PARSER_H_
#include "packager/media/base/media_parser.h"
#include <stdint.h>
#include <string>
#include <vector>
#include "packager/base/compiler_specific.h"
#include "packager/media/base/media_parser.h"
namespace edash_packager {
namespace media {
@ -43,8 +44,8 @@ class WebVttMediaParser : public MediaParser {
void Init(const InitCB& init_cb,
const NewSampleCB& new_sample_cb,
KeySource* decryption_key_source) override;
void Flush() override;
bool Parse(const uint8_t* buf, int size) override;
bool Flush() override WARN_UNUSED_RESULT;
bool Parse(const uint8_t* buf, int size) override WARN_UNUSED_RESULT;
/// @}
private:

View File

@ -61,7 +61,7 @@ TEST_F(WebVttMediaParserTest, ParseOneCue) {
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
// Verify that different types of line breaks work.
@ -78,7 +78,7 @@ TEST_F(WebVttMediaParserTest, DifferentLineBreaks) {
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
TEST_F(WebVttMediaParserTest, ParseMultpleCues) {
@ -99,7 +99,7 @@ TEST_F(WebVttMediaParserTest, ParseMultpleCues) {
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
MATCHER_P2(MatchesStartTimeAndDuration, start_time, duration, "") {
@ -123,7 +123,7 @@ TEST_F(WebVttMediaParserTest, VerifyTimingParsing) {
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
// Expect parse failure if hour part of the timestamp is too short.
@ -192,7 +192,7 @@ TEST_F(WebVttMediaParserTest, VerifyCuePayload) {
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
// Verify that a sample can be created from multiple calls to Parse(), i.e. one
@ -213,7 +213,7 @@ TEST_F(WebVttMediaParserTest, PartialParse) {
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt) + 8,
arraysize(kWebVtt) - 1 - 8));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
// Verify that metadata header with --> is rejected.
@ -226,7 +226,7 @@ TEST_F(WebVttMediaParserTest, BadMetadataHeader) {
InitializeParser();
EXPECT_FALSE(parser_.Parse(reinterpret_cast<const uint8_t*>(kBadWebVtt),
arraysize(kBadWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
MATCHER_P(MatchesComment, comment, "") {
@ -253,7 +253,7 @@ TEST_F(WebVttMediaParserTest, Comment) {
InitializeParser();
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
// Verify that comment with --> is rejected.
@ -269,7 +269,7 @@ TEST_F(WebVttMediaParserTest, BadComment) {
InitializeParser();
EXPECT_FALSE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
MATCHER_P(HeaderMatches, header, "") {
@ -295,7 +295,7 @@ TEST_F(WebVttMediaParserTest, Header) {
InitializeParser();
EXPECT_TRUE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
// Verify that if timing is not present after an identifier, the parser errors.
@ -311,7 +311,7 @@ TEST_F(WebVttMediaParserTest, NoTimingAfterIdentifier) {
InitializeParser();
EXPECT_FALSE(parser_.Parse(reinterpret_cast<const uint8_t*>(kWebVtt),
arraysize(kWebVtt) - 1));
parser_.Flush();
EXPECT_TRUE(parser_.Flush());
}
} // namespace media

View File

@ -469,7 +469,9 @@ bool WvmMediaParser::Parse(const uint8_t* buf, int size) {
if (!DemuxNextPes(true)) {
return false;
}
Flush();
if (!Flush()) {
return false;
}
// Reset.
dts_ = pts_ = 0;
parse_state_ = StartCode1;
@ -515,7 +517,7 @@ bool WvmMediaParser::EmitPendingSamples() {
return true;
}
void WvmMediaParser::Flush() {
bool WvmMediaParser::Flush() {
// Flush the last audio and video sample for current program.
// Reset the streamID when successfully emitted.
if (prev_media_sample_data_.audio_sample != NULL) {
@ -523,6 +525,7 @@ void WvmMediaParser::Flush() {
prev_media_sample_data_.audio_sample)) {
LOG(ERROR) << "Did not emit last sample for audio stream with ID = "
<< prev_pes_stream_id_;
return false;
}
}
if (prev_media_sample_data_.video_sample != NULL) {
@ -530,8 +533,10 @@ void WvmMediaParser::Flush() {
prev_media_sample_data_.video_sample)) {
LOG(ERROR) << "Did not emit last sample for video stream with ID = "
<< prev_pes_stream_id_;
return false;
}
}
return true;
}
bool WvmMediaParser::ParseIndexEntry() {

View File

@ -11,6 +11,7 @@
#include <string>
#include <vector>
#include "packager/base/compiler_specific.h"
#include "packager/base/memory/scoped_ptr.h"
#include "packager/media/base/media_parser.h"
#include "packager/media/base/network_util.h"
@ -52,14 +53,14 @@ class WvmMediaParser : public MediaParser {
WvmMediaParser();
~WvmMediaParser() override;
// MediaParser implementation overrides.
/// @name MediaParser implementation overrides.
/// @{
void Init(const InitCB& init_cb,
const NewSampleCB& new_sample_cb,
KeySource* decryption_key_source) override;
void Flush() override;
bool Parse(const uint8_t* buf, int size) override;
bool Flush() override WARN_UNUSED_RESULT;
bool Parse(const uint8_t* buf, int size) override WARN_UNUSED_RESULT;
/// @}
private:
enum Tag {