Modified mp2t::MediaParser to work with the packaging SDK.

Added transport stream test data.

Change-Id: I2f20d0a67eb5a1157ceed08af67368895016170c
This commit is contained in:
Thomas Inskip 2014-04-10 12:57:10 -07:00
parent 20e66b2109
commit c5f1e5eb7a
17 changed files with 275 additions and 528 deletions

View File

@ -12,14 +12,17 @@
namespace media { namespace media {
class MediaSample; class MediaSample;
class StreamInfo;
namespace mp2t { namespace mp2t {
class EsParser { class EsParser {
public: public:
typedef base::Callback<void(scoped_refptr<MediaSample>&)> EmitSampleCB; typedef base::Callback<void(scoped_refptr<StreamInfo>&)> NewStreamInfoCB;
typedef base::Callback<void(uint32, scoped_refptr<MediaSample>&)> EmitSampleCB;
EsParser(uint32 track_id) : track_id_(track_id) {} EsParser(uint32 pid)
: pid_(pid) {}
virtual ~EsParser() {} virtual ~EsParser() {}
// ES parsing. // ES parsing.
@ -32,10 +35,10 @@ class EsParser {
// Reset the state of the ES parser. // Reset the state of the ES parser.
virtual void Reset() = 0; virtual void Reset() = 0;
uint32 track_id() { return track_id_; } uint32 pid() { return pid_; }
private: private:
uint32 track_id_; uint32 pid_;
}; };
} // namespace mp2t } // namespace mp2t

View File

@ -98,12 +98,12 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
namespace mp2t { namespace mp2t {
EsParserAdts::EsParserAdts( EsParserAdts::EsParserAdts(
uint32 track_id, uint32 pid,
const NewAudioConfigCB& new_audio_config_cb, const NewStreamInfoCB& new_stream_info_cb,
const EmitSampleCB& emit_sample_cb, const EmitSampleCB& emit_sample_cb,
bool sbr_in_mimetype) bool sbr_in_mimetype)
: EsParser(track_id), : EsParser(pid),
new_audio_config_cb_(new_audio_config_cb), new_stream_info_cb_(new_stream_info_cb),
emit_sample_cb_(emit_sample_cb), emit_sample_cb_(emit_sample_cb),
sbr_in_mimetype_(sbr_in_mimetype) { sbr_in_mimetype_(sbr_in_mimetype) {
} }
@ -170,7 +170,7 @@ bool EsParserAdts::Parse(const uint8* buf, int size, int64 pts, int64 dts) {
sample->set_pts(current_pts); sample->set_pts(current_pts);
sample->set_dts(current_pts); sample->set_dts(current_pts);
sample->set_duration(frame_duration); sample->set_duration(frame_duration);
emit_sample_cb_.Run(sample); emit_sample_cb_.Run(pid(), sample);
// Update the PTS of the next frame. // Update the PTS of the next frame.
audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame); audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame);
@ -229,9 +229,9 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
? std::min(2 * samples_per_second, 48000) ? std::min(2 * samples_per_second, 48000)
: samples_per_second; : samples_per_second;
last_audio_decoder_config_ = scoped_refptr<AudioStreamInfo>( last_audio_decoder_config_ = scoped_refptr<StreamInfo>(
new AudioStreamInfo( new AudioStreamInfo(
track_id(), pid(),
kMpeg2Timescale, kMpeg2Timescale,
kInfiniteDuration, kInfiniteDuration,
kCodecAAC, kCodecAAC,
@ -260,7 +260,7 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
} }
// Audio config notification. // Audio config notification.
new_audio_config_cb_.Run(last_audio_decoder_config_); new_stream_info_cb_.Run(last_audio_decoder_config_);
return true; return true;
} }

View File

@ -25,11 +25,8 @@ namespace mp2t {
class EsParserAdts : public EsParser { class EsParserAdts : public EsParser {
public: public:
typedef base::Callback<void( EsParserAdts(uint32 pid,
scoped_refptr<AudioStreamInfo>&)> NewAudioConfigCB; const NewStreamInfoCB& new_stream_info_cb,
EsParserAdts(uint32 track_id,
const NewAudioConfigCB& new_audio_config_cb,
const EmitSampleCB& emit_sample_cb, const EmitSampleCB& emit_sample_cb,
bool sbr_in_mimetype); bool sbr_in_mimetype);
virtual ~EsParserAdts(); virtual ~EsParserAdts();
@ -57,7 +54,7 @@ class EsParserAdts : public EsParser {
// Callbacks: // Callbacks:
// - to signal a new audio configuration, // - to signal a new audio configuration,
// - to send ES buffers. // - to send ES buffers.
NewAudioConfigCB new_audio_config_cb_; NewStreamInfoCB new_stream_info_cb_;
EmitSampleCB emit_sample_cb_; EmitSampleCB emit_sample_cb_;
// True when AAC SBR extension is signalled in the mimetype // True when AAC SBR extension is signalled in the mimetype
@ -73,7 +70,7 @@ class EsParserAdts : public EsParser {
// Interpolated PTS for frames that don't have one. // Interpolated PTS for frames that don't have one.
scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_; scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
scoped_refptr<AudioStreamInfo> last_audio_decoder_config_; scoped_refptr<StreamInfo> last_audio_decoder_config_;
DISALLOW_COPY_AND_ASSIGN(EsParserAdts); DISALLOW_COPY_AND_ASSIGN(EsParserAdts);
}; };

View File

@ -35,11 +35,11 @@ const uint8 kCommonNaluLengthSize = 4;
} // anonymous namespace } // anonymous namespace
EsParserH264::EsParserH264( EsParserH264::EsParserH264(
uint32 track_id, uint32 pid,
const NewVideoConfigCB& new_video_config_cb, const NewStreamInfoCB& new_stream_info_cb,
const EmitSampleCB& emit_sample_cb) const EmitSampleCB& emit_sample_cb)
: EsParser(track_id), : EsParser(pid),
new_video_config_cb_(new_video_config_cb), new_stream_info_cb_(new_stream_info_cb),
emit_sample_cb_(emit_sample_cb), emit_sample_cb_(emit_sample_cb),
es_queue_(new media::OffsetByteQueue()), es_queue_(new media::OffsetByteQueue()),
h264_parser_(new H264Parser()), h264_parser_(new H264Parser()),
@ -95,7 +95,7 @@ void EsParserH264::Reset() {
current_access_unit_pos_ = 0; current_access_unit_pos_ = 0;
next_access_unit_pos_ = 0; next_access_unit_pos_ = 0;
timing_desc_list_.clear(); timing_desc_list_.clear();
last_video_decoder_config_ = scoped_refptr<VideoStreamInfo>(); last_video_decoder_config_ = scoped_refptr<StreamInfo>();
} }
bool EsParserH264::FindAUD(int64* stream_pos) { bool EsParserH264::FindAUD(int64* stream_pos) {
@ -282,7 +282,7 @@ bool EsParserH264::EmitFrame(int64 access_unit_pos, int access_unit_size,
is_key_frame); is_key_frame);
media_sample->set_dts(current_timing_desc.dts); media_sample->set_dts(current_timing_desc.dts);
media_sample->set_pts(current_timing_desc.pts); media_sample->set_pts(current_timing_desc.pts);
emit_sample_cb_.Run(media_sample); emit_sample_cb_.Run(pid(), media_sample);
return true; return true;
} }
@ -299,9 +299,9 @@ bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps) {
uint16 width = (sps->pic_width_in_mbs_minus1 + 1) * 16; uint16 width = (sps->pic_width_in_mbs_minus1 + 1) * 16;
uint16 height = (sps->pic_height_in_map_units_minus1 + 1) * 16; uint16 height = (sps->pic_height_in_map_units_minus1 + 1) * 16;
last_video_decoder_config_ = scoped_refptr<VideoStreamInfo>( last_video_decoder_config_ = scoped_refptr<StreamInfo>(
new VideoStreamInfo( new VideoStreamInfo(
track_id(), pid(),
kMpeg2Timescale, kMpeg2Timescale,
kInfiniteDuration, kInfiniteDuration,
kCodecH264, kCodecH264,
@ -323,7 +323,7 @@ bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps) {
<< " height=" << sps->sar_height; << " height=" << sps->sar_height;
// Video config notification. // Video config notification.
new_video_config_cb_.Run(last_video_decoder_config_); new_stream_info_cb_.Run(last_video_decoder_config_);
return true; return true;
} }

View File

@ -34,11 +34,8 @@ namespace mp2t {
// //
class EsParserH264 : public EsParser { class EsParserH264 : public EsParser {
public: public:
typedef base::Callback<void( EsParserH264(uint32 pid,
scoped_refptr<VideoStreamInfo>&)> NewVideoConfigCB; const NewStreamInfoCB& new_stream_info_cb,
EsParserH264(uint32 track_id,
const NewVideoConfigCB& new_video_config_cb,
const EmitSampleCB& emit_sample_cb); const EmitSampleCB& emit_sample_cb);
virtual ~EsParserH264(); virtual ~EsParserH264();
@ -74,7 +71,7 @@ class EsParserH264 : public EsParser {
bool UpdateVideoDecoderConfig(const filters::H264SPS* sps); bool UpdateVideoDecoderConfig(const filters::H264SPS* sps);
// Callbacks to pass the stream configuration and the frames. // Callbacks to pass the stream configuration and the frames.
NewVideoConfigCB new_video_config_cb_; NewStreamInfoCB new_stream_info_cb_;
EmitSampleCB emit_sample_cb_; EmitSampleCB emit_sample_cb_;
// Bytes of the ES stream that have not been emitted yet. // Bytes of the ES stream that have not been emitted yet.
@ -89,7 +86,7 @@ class EsParserH264 : public EsParser {
int64 next_access_unit_pos_; int64 next_access_unit_pos_;
// Last video decoder config. // Last video decoder config.
scoped_refptr<VideoStreamInfo> last_video_decoder_config_; scoped_refptr<StreamInfo> last_video_decoder_config_;
}; };
} // namespace mp2t } // namespace mp2t

View File

@ -129,11 +129,11 @@ class EsParserH264Test : public testing::Test {
void LoadStream(const char* filename); void LoadStream(const char* filename);
void ProcessPesPackets(const std::vector<Packet>& pes_packets); void ProcessPesPackets(const std::vector<Packet>& pes_packets);
void EmitSample(scoped_refptr<MediaSample>& sample) { void EmitSample(uint32 pid, scoped_refptr<MediaSample>& sample) {
sample_count_++; sample_count_++;
} }
void NewVideoConfig(scoped_refptr<VideoStreamInfo>& config) { void NewVideoConfig(scoped_refptr<StreamInfo>& config) {
} }
size_t sample_count() const { return sample_count_; } size_t sample_count() const { return sample_count_; }

View File

@ -24,6 +24,18 @@
'es_parser_adts.h', 'es_parser_adts.h',
'es_parser_h264.cc', 'es_parser_h264.cc',
'es_parser_h264.h', 'es_parser_h264.h',
'mp2t_media_parser.cc',
'mp2t_media_parser.h',
'ts_packet.cc',
'ts_packet.h',
'ts_section_pat.cc',
'ts_section_pat.h',
'ts_section_pes.cc',
'ts_section_pes.h',
'ts_section_pmt.cc',
'ts_section_pmt.h',
'ts_section_psi.cc',
'ts_section_psi.h',
], ],
'dependencies': [ 'dependencies': [
'../../base/media_base.gyp:base', '../../base/media_base.gyp:base',
@ -34,12 +46,14 @@
'type': '<(gtest_target_type)', 'type': '<(gtest_target_type)',
'sources': [ 'sources': [
'es_parser_h264_unittest.cc', 'es_parser_h264_unittest.cc',
'mp2t_media_parser_unittest.cc',
], ],
'dependencies': [ 'dependencies': [
'../../../testing/gtest.gyp:gtest', '../../../testing/gtest.gyp:gtest',
'../../../testing/gmock.gyp:gmock', '../../../testing/gmock.gyp:gmock',
'../../filters/filters.gyp:filters', '../../filters/filters.gyp:filters',
'../../test/media_test.gyp:media_test_support', '../../test/media_test.gyp:media_test_support',
'../mpeg/mpeg.gyp:mpeg',
'mp2t', 'mp2t',
] ]
}, },

View File

@ -2,16 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#include "media/formats/mp2t/mp2t_stream_parser.h" #include "media/formats/mp2t/mp2t_media_parser.h"
#include "base/bind.h" #include "base/bind.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "base/stl_util.h" #include "base/stl_util.h"
#include "media/base/audio_decoder_config.h" #include "media/base/media_sample.h"
#include "media/base/buffers.h" #include "media/base/stream_info.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/mp2t/es_parser.h" #include "media/formats/mp2t/es_parser.h"
#include "media/formats/mp2t/es_parser_adts.h" #include "media/formats/mp2t/es_parser_adts.h"
#include "media/formats/mp2t/es_parser_h264.h" #include "media/formats/mp2t/es_parser_h264.h"
@ -41,7 +38,7 @@ class PidState {
kPidVideoPes, kPidVideoPes,
}; };
PidState(int pid, PidType pid_tyoe, PidState(int pid, PidType pid_type,
scoped_ptr<TsSection> section_parser); scoped_ptr<TsSection> section_parser);
// Extract the content of the TS packet and parse it. // Extract the content of the TS packet and parse it.
@ -61,6 +58,11 @@ class PidState {
PidType pid_type() const { return pid_type_; } PidType pid_type() const { return pid_type_; }
scoped_refptr<StreamInfo>& config() { return config_; }
void set_config(scoped_refptr<StreamInfo>& config) { config_ = config; }
SampleQueue& sample_queue() { return sample_queue_; }
private: private:
void ResetState(); void ResetState();
@ -69,8 +71,9 @@ class PidState {
scoped_ptr<TsSection> section_parser_; scoped_ptr<TsSection> section_parser_;
bool enable_; bool enable_;
int continuity_counter_; int continuity_counter_;
scoped_refptr<StreamInfo> config_;
SampleQueue sample_queue_;
}; };
PidState::PidState(int pid, PidType pid_type, PidState::PidState(int pid, PidType pid_type,
@ -95,6 +98,7 @@ bool PidState::PushTsPacket(const TsPacket& ts_packet) {
if (continuity_counter_ >= 0 && if (continuity_counter_ >= 0 &&
ts_packet.continuity_counter() != expected_continuity_counter) { ts_packet.continuity_counter() != expected_continuity_counter) {
DVLOG(1) << "TS discontinuity detected for pid: " << pid_; DVLOG(1) << "TS discontinuity detected for pid: " << pid_;
// TODO(tinskip): Handle discontinuity better.
return false; return false;
} }
@ -104,7 +108,7 @@ bool PidState::PushTsPacket(const TsPacket& ts_packet) {
ts_packet.payload_size()); ts_packet.payload_size());
// At the minimum, when parsing failed, auto reset the section parser. // At the minimum, when parsing failed, auto reset the section parser.
// Components that use the StreamParser can take further action if needed. // Components that use the MediaParser can take further action if needed.
if (!status) { if (!status) {
DVLOG(1) << "Parsing failed for pid = " << pid_; DVLOG(1) << "Parsing failed for pid = " << pid_;
ResetState(); ResetState();
@ -139,59 +143,32 @@ void PidState::ResetState() {
continuity_counter_ = -1; continuity_counter_ = -1;
} }
Mp2tStreamParser::BufferQueueWithConfig::BufferQueueWithConfig( MediaParser::MediaParser()
bool is_cfg_sent, : sbr_in_mimetype_(false),
const AudioDecoderConfig& audio_cfg, is_initialized_(false) {
const VideoDecoderConfig& video_cfg)
: is_config_sent(is_cfg_sent),
audio_config(audio_cfg),
video_config(video_cfg) {
} }
Mp2tStreamParser::BufferQueueWithConfig::~BufferQueueWithConfig() { MediaParser::~MediaParser() {
}
Mp2tStreamParser::Mp2tStreamParser(bool sbr_in_mimetype)
: sbr_in_mimetype_(sbr_in_mimetype),
selected_audio_pid_(-1),
selected_video_pid_(-1),
is_initialized_(false),
segment_started_(false),
first_video_frame_in_segment_(true) {
}
Mp2tStreamParser::~Mp2tStreamParser() {
STLDeleteValues(&pids_); STLDeleteValues(&pids_);
} }
void Mp2tStreamParser::Init( void MediaParser::Init(
const InitCB& init_cb, const InitCB& init_cb,
const NewConfigCB& config_cb, const NewSampleCB& new_sample_cb,
const NewBuffersCB& new_buffers_cb, const NeedKeyCB& need_key_cb) {
bool /* ignore_text_tracks */ ,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) {
DCHECK(!is_initialized_); DCHECK(!is_initialized_);
DCHECK(init_cb_.is_null()); DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null()); DCHECK(!init_cb.is_null());
DCHECK(!config_cb.is_null()); DCHECK(!new_sample_cb.is_null());
DCHECK(!new_buffers_cb.is_null());
DCHECK(!need_key_cb.is_null()); DCHECK(!need_key_cb.is_null());
DCHECK(!end_of_segment_cb.is_null());
init_cb_ = init_cb; init_cb_ = init_cb;
config_cb_ = config_cb; new_sample_cb_ = new_sample_cb;
new_buffers_cb_ = new_buffers_cb;
need_key_cb_ = need_key_cb; need_key_cb_ = need_key_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
log_cb_ = log_cb;
} }
void Mp2tStreamParser::Flush() { void MediaParser::Flush() {
DVLOG(1) << "Mp2tStreamParser::Flush"; DVLOG(1) << "MediaParser::Flush";
// Flush the buffers and reset the pids. // Flush the buffers and reset the pids.
for (std::map<int, PidState*>::iterator it = pids_.begin(); for (std::map<int, PidState*>::iterator it = pids_.begin();
@ -199,29 +176,17 @@ void Mp2tStreamParser::Flush() {
DVLOG(1) << "Flushing PID: " << it->first; DVLOG(1) << "Flushing PID: " << it->first;
PidState* pid_state = it->second; PidState* pid_state = it->second;
pid_state->Flush(); pid_state->Flush();
delete pid_state;
} }
pids_.clear(); EmitRemainingSamples();
EmitRemainingBuffers(); STLDeleteValues(&pids_);
buffer_queue_chain_.clear();
// End of the segment.
// Note: does not need to invoke |end_of_segment_cb_| since flushing the
// stream parser already involves the end of the current segment.
segment_started_ = false;
first_video_frame_in_segment_ = true;
// Remove any bytes left in the TS buffer. // Remove any bytes left in the TS buffer.
// (i.e. any partial TS packet => less than 188 bytes). // (i.e. any partial TS packet => less than 188 bytes).
ts_byte_queue_.Reset(); ts_byte_queue_.Reset();
// Reset the selected PIDs.
selected_audio_pid_ = -1;
selected_video_pid_ = -1;
} }
bool Mp2tStreamParser::Parse(const uint8* buf, int size) { bool MediaParser::Parse(const uint8* buf, int size) {
DVLOG(1) << "Mp2tStreamParser::Parse size=" << size; DVLOG(1) << "MediaParser::Parse size=" << size;
// Add the data to the parser state. // Add the data to the parser state.
ts_byte_queue_.Push(buf, size); ts_byte_queue_.Push(buf, size);
@ -260,7 +225,7 @@ bool Mp2tStreamParser::Parse(const uint8* buf, int size) {
// Create the PAT state here if needed. // Create the PAT state here if needed.
scoped_ptr<TsSection> pat_section_parser( scoped_ptr<TsSection> pat_section_parser(
new TsSectionPat( new TsSectionPat(
base::Bind(&Mp2tStreamParser::RegisterPmt, base::Bind(&MediaParser::RegisterPmt,
base::Unretained(this)))); base::Unretained(this))));
scoped_ptr<PidState> pat_pid_state( scoped_ptr<PidState> pat_pid_state(
new PidState(ts_packet->pid(), PidState::kPidPat, new PidState(ts_packet->pid(), PidState::kPidPat,
@ -282,13 +247,11 @@ bool Mp2tStreamParser::Parse(const uint8* buf, int size) {
ts_byte_queue_.Pop(TsPacket::kPacketSize); ts_byte_queue_.Pop(TsPacket::kPacketSize);
} }
RCHECK(FinishInitializationIfNeeded());
// Emit the A/V buffers that kept accumulating during TS parsing. // Emit the A/V buffers that kept accumulating during TS parsing.
return EmitRemainingBuffers(); return EmitRemainingSamples();
} }
void Mp2tStreamParser::RegisterPmt(int program_number, int pmt_pid) { void MediaParser::RegisterPmt(int program_number, int pmt_pid) {
DVLOG(1) << "RegisterPmt:" DVLOG(1) << "RegisterPmt:"
<< " program_number=" << program_number << " program_number=" << program_number
<< " pmt_pid=" << pmt_pid; << " pmt_pid=" << pmt_pid;
@ -308,7 +271,7 @@ void Mp2tStreamParser::RegisterPmt(int program_number, int pmt_pid) {
DVLOG(1) << "Create a new PMT parser"; DVLOG(1) << "Create a new PMT parser";
scoped_ptr<TsSection> pmt_section_parser( scoped_ptr<TsSection> pmt_section_parser(
new TsSectionPmt( new TsSectionPmt(
base::Bind(&Mp2tStreamParser::RegisterPes, base::Bind(&MediaParser::RegisterPes,
base::Unretained(this), pmt_pid))); base::Unretained(this), pmt_pid)));
scoped_ptr<PidState> pmt_pid_state( scoped_ptr<PidState> pmt_pid_state(
new PidState(pmt_pid, PidState::kPidPmt, pmt_section_parser.Pass())); new PidState(pmt_pid, PidState::kPidPmt, pmt_section_parser.Pass()));
@ -316,7 +279,7 @@ void Mp2tStreamParser::RegisterPmt(int program_number, int pmt_pid) {
pids_.insert(std::pair<int, PidState*>(pmt_pid, pmt_pid_state.release())); pids_.insert(std::pair<int, PidState*>(pmt_pid, pmt_pid_state.release()));
} }
void Mp2tStreamParser::RegisterPes(int pmt_pid, void MediaParser::RegisterPes(int pmt_pid,
int pes_pid, int pes_pid,
int stream_type) { int stream_type) {
// TODO(damienv): check there is no mismatch if the entry already exists. // TODO(damienv): check there is no mismatch if the entry already exists.
@ -333,21 +296,19 @@ void Mp2tStreamParser::RegisterPes(int pmt_pid,
if (stream_type == kStreamTypeAVC) { if (stream_type == kStreamTypeAVC) {
es_parser.reset( es_parser.reset(
new EsParserH264( new EsParserH264(
base::Bind(&Mp2tStreamParser::OnVideoConfigChanged, pes_pid,
base::Unretained(this), base::Bind(&MediaParser::OnNewStreamInfo,
pes_pid), base::Unretained(this)),
base::Bind(&Mp2tStreamParser::OnEmitVideoBuffer, base::Bind(&MediaParser::OnEmitSample,
base::Unretained(this), base::Unretained(this))));
pes_pid)));
} else if (stream_type == kStreamTypeAAC) { } else if (stream_type == kStreamTypeAAC) {
es_parser.reset( es_parser.reset(
new EsParserAdts( new EsParserAdts(
base::Bind(&Mp2tStreamParser::OnAudioConfigChanged, pes_pid,
base::Unretained(this), base::Bind(&MediaParser::OnNewStreamInfo,
pes_pid), base::Unretained(this)),
base::Bind(&Mp2tStreamParser::OnEmitAudioBuffer, base::Bind(&MediaParser::OnEmitSample,
base::Unretained(this), base::Unretained(this)),
pes_pid),
sbr_in_mimetype_)); sbr_in_mimetype_));
is_audio = true; is_audio = true;
} else { } else {
@ -362,261 +323,107 @@ void Mp2tStreamParser::RegisterPes(int pmt_pid,
is_audio ? PidState::kPidAudioPes : PidState::kPidVideoPes; is_audio ? PidState::kPidAudioPes : PidState::kPidVideoPes;
scoped_ptr<PidState> pes_pid_state( scoped_ptr<PidState> pes_pid_state(
new PidState(pes_pid, pid_type, pes_section_parser.Pass())); new PidState(pes_pid, pid_type, pes_section_parser.Pass()));
pes_pid_state->Enable();
pids_.insert(std::pair<int, PidState*>(pes_pid, pes_pid_state.release())); pids_.insert(std::pair<int, PidState*>(pes_pid, pes_pid_state.release()));
// A new PES pid has been added, the PID filter might change.
UpdatePidFilter();
} }
void Mp2tStreamParser::UpdatePidFilter() { void MediaParser::OnNewStreamInfo(
// Applies the HLS rule to select the default audio/video PIDs: scoped_refptr<StreamInfo>& new_stream_info) {
// select the audio/video streams with the lowest PID. DCHECK(new_stream_info);
// TODO(damienv): this can be changed when the StreamParser interface DVLOG(1) << "OnVideoConfigChanged for pid=" << new_stream_info->track_id();
// supports multiple audio/video streams.
PidMap::iterator lowest_audio_pid = pids_.end(); PidMap::iterator pid_state = pids_.find(new_stream_info->track_id());
PidMap::iterator lowest_video_pid = pids_.end(); if (pid_state == pids_.end()) {
for (PidMap::iterator it = pids_.begin(); it != pids_.end(); ++it) { LOG(ERROR) << "PID State for new stream not found (pid = "
int pid = it->first; << new_stream_info->track_id() << ").";
PidState* pid_state = it->second; return;
if (pid_state->pid_type() == PidState::kPidAudioPes &&
(lowest_audio_pid == pids_.end() || pid < lowest_audio_pid->first))
lowest_audio_pid = it;
if (pid_state->pid_type() == PidState::kPidVideoPes &&
(lowest_video_pid == pids_.end() || pid < lowest_video_pid->first))
lowest_video_pid = it;
} }
// Enable both the lowest audio and video PIDs. // Set the stream configuration information for the PID.
if (lowest_audio_pid != pids_.end()) { pid_state->second->set_config(new_stream_info);
DVLOG(1) << "Enable audio pid: " << lowest_audio_pid->first;
lowest_audio_pid->second->Enable(); // Finish initialization if all streams have configs.
selected_audio_pid_ = lowest_audio_pid->first; FinishInitializationIfNeeded();
}
if (lowest_video_pid != pids_.end()) {
DVLOG(1) << "Enable video pid: " << lowest_video_pid->first;
lowest_video_pid->second->Enable();
selected_video_pid_ = lowest_video_pid->first;
} }
// Disable all the other audio and video PIDs. bool MediaParser::FinishInitializationIfNeeded() {
for (PidMap::iterator it = pids_.begin(); it != pids_.end(); ++it) {
PidState* pid_state = it->second;
if (it != lowest_audio_pid && it != lowest_video_pid &&
(pid_state->pid_type() == PidState::kPidAudioPes ||
pid_state->pid_type() == PidState::kPidVideoPes))
pid_state->Disable();
}
}
void Mp2tStreamParser::OnVideoConfigChanged(
int pes_pid,
const VideoDecoderConfig& video_decoder_config) {
DVLOG(1) << "OnVideoConfigChanged for pid=" << pes_pid;
DCHECK_EQ(pes_pid, selected_video_pid_);
DCHECK(video_decoder_config.IsValidConfig());
// Create a new entry in |buffer_queue_chain_| with the updated configs.
BufferQueueWithConfig buffer_queue_with_config(
false,
buffer_queue_chain_.empty()
? AudioDecoderConfig() : buffer_queue_chain_.back().audio_config,
video_decoder_config);
buffer_queue_chain_.push_back(buffer_queue_with_config);
// Replace any non valid config with the 1st valid entry.
// This might happen if there was no available config before.
for (std::list<BufferQueueWithConfig>::iterator it =
buffer_queue_chain_.begin(); it != buffer_queue_chain_.end(); ++it) {
if (it->video_config.IsValidConfig())
break;
it->video_config = video_decoder_config;
}
}
void Mp2tStreamParser::OnAudioConfigChanged(
int pes_pid,
const AudioDecoderConfig& audio_decoder_config) {
DVLOG(1) << "OnAudioConfigChanged for pid=" << pes_pid;
DCHECK_EQ(pes_pid, selected_audio_pid_);
DCHECK(audio_decoder_config.IsValidConfig());
// Create a new entry in |buffer_queue_chain_| with the updated configs.
BufferQueueWithConfig buffer_queue_with_config(
false,
audio_decoder_config,
buffer_queue_chain_.empty()
? VideoDecoderConfig() : buffer_queue_chain_.back().video_config);
buffer_queue_chain_.push_back(buffer_queue_with_config);
// Replace any non valid config with the 1st valid entry.
// This might happen if there was no available config before.
for (std::list<BufferQueueWithConfig>::iterator it =
buffer_queue_chain_.begin(); it != buffer_queue_chain_.end(); ++it) {
if (it->audio_config.IsValidConfig())
break;
it->audio_config = audio_decoder_config;
}
}
bool Mp2tStreamParser::FinishInitializationIfNeeded() {
// Nothing to be done if already initialized. // Nothing to be done if already initialized.
if (is_initialized_) if (is_initialized_)
return true; return true;
// Wait for more data to come to finish initialization. // Wait for more data to come to finish initialization.
if (buffer_queue_chain_.empty()) if (pids_.empty())
return true; return true;
// Wait for more data to come if one of the config is not available. std::vector<scoped_refptr<StreamInfo> > all_stream_info;
BufferQueueWithConfig& queue_with_config = buffer_queue_chain_.front(); uint32 num_es(0);
if (selected_audio_pid_ > 0 && for (PidMap::const_iterator iter = pids_.begin(); iter != pids_.end();
!queue_with_config.audio_config.IsValidConfig()) ++iter) {
return true; if (((iter->second->pid_type() == PidState::kPidAudioPes) ||
if (selected_video_pid_ > 0 && (iter->second->pid_type() == PidState::kPidVideoPes))) {
!queue_with_config.video_config.IsValidConfig()) ++num_es;
return true; if (iter->second->config())
all_stream_info.push_back(iter->second->config());
// Pass the config before invoking the initialization callback. }
RCHECK(config_cb_.Run(queue_with_config.audio_config, }
queue_with_config.video_config, if (num_es && (all_stream_info.size() == num_es)) {
TextTrackConfigMap())); // All stream configurations have been received. Initialization can
queue_with_config.is_config_sent = true; // be completed.
init_cb_.Run(all_stream_info);
// For Mpeg2 TS, the duration is not known.
DVLOG(1) << "Mpeg2TS stream parser initialization done"; DVLOG(1) << "Mpeg2TS stream parser initialization done";
init_cb_.Run(true, kInfiniteDuration(), false);
is_initialized_ = true; is_initialized_ = true;
}
return true; return true;
} }
void Mp2tStreamParser::OnEmitAudioBuffer( void MediaParser::OnEmitSample(uint32 pes_pid,
int pes_pid, scoped_refptr<MediaSample>& new_sample) {
scoped_refptr<StreamParserBuffer> stream_parser_buffer) { DCHECK(new_sample);
DCHECK_EQ(pes_pid, selected_audio_pid_);
DVLOG(LOG_LEVEL_ES) DVLOG(LOG_LEVEL_ES)
<< "OnEmitAudioBuffer: " << "OnEmitSample: "
<< " pid="
<< pes_pid
<< " size=" << " size="
<< stream_parser_buffer->data_size() << new_sample->data_size()
<< " dts=" << " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds() << new_sample->dts()
<< " pts=" << " pts="
<< stream_parser_buffer->timestamp().InMilliseconds(); << new_sample->pts();
stream_parser_buffer->set_timestamp(
stream_parser_buffer->timestamp() - time_offset_);
stream_parser_buffer->SetDecodeTimestamp(
stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
// Ignore the incoming buffer if it is not associated with any config. // Add the sample to the appropriate PID sample queue.
if (buffer_queue_chain_.empty()) { PidMap::iterator pid_state = pids_.find(pes_pid);
DVLOG(1) << "Ignoring audio buffer with no corresponding audio config"; if (pid_state == pids_.end()) {
LOG(ERROR) << "PID State for new sample not found (pid = "
<< pes_pid << ").";
return; return;
} }
pid_state->second->sample_queue().push_back(new_sample);
buffer_queue_chain_.back().audio_queue.push_back(stream_parser_buffer);
} }
void Mp2tStreamParser::OnEmitVideoBuffer( bool MediaParser::EmitRemainingSamples() {
int pes_pid, DVLOG(LOG_LEVEL_ES) << "mp2t::MediaParser::EmitRemainingBuffers";
scoped_refptr<StreamParserBuffer> stream_parser_buffer) {
DCHECK_EQ(pes_pid, selected_video_pid_);
DVLOG(LOG_LEVEL_ES)
<< "OnEmitVideoBuffer"
<< " size="
<< stream_parser_buffer->data_size()
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds()
<< " pts="
<< stream_parser_buffer->timestamp().InMilliseconds()
<< " IsKeyframe="
<< stream_parser_buffer->IsKeyframe();
stream_parser_buffer->set_timestamp(
stream_parser_buffer->timestamp() - time_offset_);
stream_parser_buffer->SetDecodeTimestamp(
stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
// Ignore the incoming buffer if it is not associated with any config.
if (buffer_queue_chain_.empty()) {
DVLOG(1) << "Ignoring video buffer with no corresponding video config:"
<< " keyframe=" << stream_parser_buffer->IsKeyframe()
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
return;
}
// A segment cannot start with a non key frame.
// Ignore the frame if that's the case.
if (first_video_frame_in_segment_ && !stream_parser_buffer->IsKeyframe()) {
DVLOG(1) << "Ignoring non-key frame:"
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
return;
}
first_video_frame_in_segment_ = false;
buffer_queue_chain_.back().video_queue.push_back(stream_parser_buffer);
}
bool Mp2tStreamParser::EmitRemainingBuffers() {
DVLOG(LOG_LEVEL_ES) << "Mp2tStreamParser::EmitRemainingBuffers";
// No buffer should be sent until fully initialized. // No buffer should be sent until fully initialized.
if (!is_initialized_) if (!is_initialized_)
return true; return true;
if (buffer_queue_chain_.empty())
return true;
// Keep track of the last audio and video config sent.
AudioDecoderConfig last_audio_config =
buffer_queue_chain_.back().audio_config;
VideoDecoderConfig last_video_config =
buffer_queue_chain_.back().video_config;
// Buffer emission. // Buffer emission.
while (!buffer_queue_chain_.empty()) { for (PidMap::const_iterator pid_iter = pids_.begin(); pid_iter != pids_.end();
// Start a segment if needed. ++pid_iter) {
if (!segment_started_) { SampleQueue& sample_queue = pid_iter->second->sample_queue();
DVLOG(1) << "Starting a new segment"; for (SampleQueue::iterator sample_iter = sample_queue.begin();
segment_started_ = true; sample_iter != sample_queue.end();
new_segment_cb_.Run(); ++sample_iter) {
} if (!new_sample_cb_.Run(pid_iter->first, *sample_iter)) {
// Error processing sample. Propagate error condition.
// Update the audio and video config if needed.
BufferQueueWithConfig& queue_with_config = buffer_queue_chain_.front();
if (!queue_with_config.is_config_sent) {
if (!config_cb_.Run(queue_with_config.audio_config,
queue_with_config.video_config,
TextTrackConfigMap()))
return false;
queue_with_config.is_config_sent = true;
}
// Add buffers.
TextBufferQueueMap empty_text_map;
if (!queue_with_config.audio_queue.empty() ||
!queue_with_config.video_queue.empty()) {
if (!new_buffers_cb_.Run(queue_with_config.audio_queue,
queue_with_config.video_queue,
empty_text_map)) {
return false; return false;
} }
} }
sample_queue.clear();
buffer_queue_chain_.pop_front();
} }
// Push an empty queue with the last audio/video config
// so that buffers with the same config can be added later on.
BufferQueueWithConfig queue_with_config(
true, last_audio_config, last_video_config);
buffer_queue_chain_.push_back(queue_with_config);
return true; return true;
} }
} // namespace mp2t } // namespace mp2t
} // namespace media } // namespace media

View File

@ -2,135 +2,95 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef MEDIA_FORMATS_MP2T_MP2T_STREAM_PARSER_H_ #ifndef MEDIA_FORMATS_MP2T_MP2T_MEDIA_PARSER_H_
#define MEDIA_FORMATS_MP2T_MP2T_STREAM_PARSER_H_ #define MEDIA_FORMATS_MP2T_MP2T_MEDIA_PARSER_H_
#include <list> #include <deque>
#include <map> #include <map>
#include "base/memory/ref_counted.h" #include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/byte_queue.h" #include "media/base/byte_queue.h"
#include "media/base/media_export.h" #include "media/base/media_parser.h"
#include "media/base/stream_parser.h" #include "media/base/stream_info.h"
#include "media/base/video_decoder_config.h"
namespace media { namespace media {
class StreamParserBuffer; class MediaSample;
namespace mp2t { namespace mp2t {
class PidState; class PidState;
class TsPacket;
class TsSection;
class MEDIA_EXPORT Mp2tStreamParser : public StreamParser { typedef std::deque<scoped_refptr<MediaSample> > SampleQueue;
class MediaParser : public media::MediaParser {
public: public:
explicit Mp2tStreamParser(bool sbr_in_mimetype); explicit MediaParser();
virtual ~Mp2tStreamParser(); virtual ~MediaParser();
// StreamParser implementation. // media::MediaParser implementation.
virtual void Init(const InitCB& init_cb, virtual void Init(const InitCB& init_cb,
const NewConfigCB& config_cb, const NewSampleCB& new_sample_cb,
const NewBuffersCB& new_buffers_cb, const NeedKeyCB& need_key_cb) OVERRIDE;
bool ignore_text_tracks,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) OVERRIDE;
virtual void Flush() OVERRIDE; virtual void Flush() OVERRIDE;
virtual bool Parse(const uint8* buf, int size) OVERRIDE; virtual bool Parse(const uint8* buf, int size) OVERRIDE;
private: private:
typedef std::map<int, PidState*> PidMap; typedef std::map<int, PidState*> PidMap;
struct BufferQueueWithConfig {
BufferQueueWithConfig(bool is_cfg_sent,
const AudioDecoderConfig& audio_cfg,
const VideoDecoderConfig& video_cfg);
~BufferQueueWithConfig();
bool is_config_sent;
AudioDecoderConfig audio_config;
StreamParser::BufferQueue audio_queue;
VideoDecoderConfig video_config;
StreamParser::BufferQueue video_queue;
};
// Callback invoked to register a Program Map Table. // Callback invoked to register a Program Map Table.
// Note: Does nothing if the PID is already registered. // Note: Does nothing if the PID is already registered.
void RegisterPmt(int program_number, int pmt_pid); void RegisterPmt(int program_number, int pmt_pid);
// Callback invoked to register a PES pid. // Callback invoked to register a PES pid.
// Possible values for |stream_type| are defined in: // Possible values for |media_type| are defined in:
// ISO-13818.1 / ITU H.222 Table 2.34 "Stream type assignments". // ISO-13818.1 / ITU H.222 Table 2.34 "Media type assignments".
// |pes_pid| is part of the Program Map Table refered by |pmt_pid|. // |pes_pid| is part of the Program Map Table refered by |pmt_pid|.
void RegisterPes(int pmt_pid, int pes_pid, int stream_type); void RegisterPes(int pmt_pid, int pes_pid, int media_type);
// Since the StreamParser interface allows only one audio & video streams,
// an automatic PID filtering should be applied to select the audio & video
// streams.
void UpdatePidFilter();
// Callback invoked each time the audio/video decoder configuration is // Callback invoked each time the audio/video decoder configuration is
// changed. // changed.
void OnVideoConfigChanged(int pes_pid, void OnNewStreamInfo(scoped_refptr<StreamInfo>& new_stream_info);
const VideoDecoderConfig& video_decoder_config);
void OnAudioConfigChanged(int pes_pid, // Callback invoked by the ES media parser
const AudioDecoderConfig& audio_decoder_config); // to emit a new audio/video access unit.
void OnEmitSample(uint32 pes_pid, scoped_refptr<MediaSample>& new_sample);
// Invoke the initialization callback if needed. // Invoke the initialization callback if needed.
bool FinishInitializationIfNeeded(); bool FinishInitializationIfNeeded();
// Callback invoked by the ES stream parser bool EmitRemainingSamples();
// to emit a new audio/video access unit.
void OnEmitAudioBuffer( /// Set the value of the "SBR in mime-type" flag which leads to sample rate
int pes_pid, /// doubling. Default value is false.
scoped_refptr<StreamParserBuffer> stream_parser_buffer); void set_sbr_in_mime_type(bool sbr_in_mimetype) {
void OnEmitVideoBuffer( sbr_in_mimetype_ = sbr_in_mimetype; }
int pes_pid,
scoped_refptr<StreamParserBuffer> stream_parser_buffer);
bool EmitRemainingBuffers();
// List of callbacks. // List of callbacks.
InitCB init_cb_; InitCB init_cb_;
NewConfigCB config_cb_; NewSampleCB new_sample_cb_;
NewBuffersCB new_buffers_cb_;
NeedKeyCB need_key_cb_; NeedKeyCB need_key_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
LogCB log_cb_;
// True when AAC SBR extension is signalled in the mimetype
// (mp4a.40.5 in the codecs parameter).
bool sbr_in_mimetype_; bool sbr_in_mimetype_;
// Bytes of the TS stream. // Bytes of the TS media.
ByteQueue ts_byte_queue_; ByteQueue ts_byte_queue_;
// List of PIDs and their state. // List of PIDs and their states.
PidMap pids_; PidMap pids_;
// Selected audio and video PIDs.
int selected_audio_pid_;
int selected_video_pid_;
// Pending audio & video buffers.
std::list<BufferQueueWithConfig> buffer_queue_chain_;
// Whether |init_cb_| has been invoked. // Whether |init_cb_| has been invoked.
bool is_initialized_; bool is_initialized_;
// Indicate whether a segment was started. DISALLOW_COPY_AND_ASSIGN(MediaParser);
bool segment_started_;
bool first_video_frame_in_segment_;
base::TimeDelta time_offset_;
DISALLOW_COPY_AND_ASSIGN(Mp2tStreamParser);
}; };
} // namespace mp2t } // namespace mp2t
} // namespace media } // namespace media
#endif #endif

View File

@ -9,36 +9,37 @@
#include "base/bind_helpers.h" #include "base/bind_helpers.h"
#include "base/logging.h" #include "base/logging.h"
#include "base/memory/ref_counted.h" #include "base/memory/ref_counted.h"
#include "base/time/time.h" #include "media/base/media_sample.h"
#include "media/base/audio_decoder_config.h" #include "media/base/stream_info.h"
#include "media/base/decoder_buffer.h" #include "media/base/timestamp.h"
#include "media/base/stream_parser_buffer.h" #include "media/base/video_stream_info.h"
#include "media/base/test_data_util.h" #include "media/formats/mp2t/mp2t_common.h"
#include "media/base/text_track_config.h" #include "media/formats/mp2t/mp2t_media_parser.h"
#include "media/base/video_decoder_config.h" #include "media/test/test_data_util.h"
#include "media/formats/mp2t/mp2t_stream_parser.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
namespace media { namespace media {
namespace mp2t { namespace mp2t {
class Mp2tStreamParserTest : public testing::Test { class Mp2tMediaParserTest : public testing::Test {
public: public:
Mp2tStreamParserTest() Mp2tMediaParserTest()
: audio_frame_count_(0), : audio_frame_count_(0),
video_frame_count_(0), video_frame_count_(0),
video_min_dts_(kNoTimestamp()), video_min_dts_(kNoTimestamp),
video_max_dts_(kNoTimestamp()) { video_max_dts_(kNoTimestamp) {
bool has_sbr = false; parser_.reset(new MediaParser());
parser_.reset(new Mp2tStreamParser(has_sbr));
} }
protected: protected:
scoped_ptr<Mp2tStreamParser> parser_; typedef std::map<int, scoped_refptr<StreamInfo> > StreamMap;
scoped_ptr<MediaParser> parser_;
StreamMap stream_map_;
int audio_frame_count_; int audio_frame_count_;
int video_frame_count_; int video_frame_count_;
base::TimeDelta video_min_dts_; int64 video_min_dts_;
base::TimeDelta video_max_dts_; int64 video_max_dts_;
bool AppendData(const uint8* data, size_t length) { bool AppendData(const uint8* data, size_t length) {
return parser_->Parse(data, length); return parser_->Parse(data, length);
@ -57,56 +58,37 @@ class Mp2tStreamParserTest : public testing::Test {
return true; return true;
} }
void OnInit(bool init_ok, void OnInit(const std::vector<scoped_refptr<StreamInfo> >& stream_infos) {
base::TimeDelta duration, DVLOG(1) << "OnInit: " << stream_infos.size() << " streams.";
bool auto_update_timestamp_offset) { for (std::vector<scoped_refptr<StreamInfo> >::const_iterator iter =
DVLOG(1) << "OnInit: ok=" << init_ok stream_infos.begin(); iter != stream_infos.end(); ++iter) {
<< ", dur=" << duration.InMilliseconds() DVLOG(1) << (*iter)->ToString();
<< ", autoTimestampOffset=" << auto_update_timestamp_offset; stream_map_[(*iter)->track_id()] = *iter;
}
bool OnNewConfig(const AudioDecoderConfig& ac,
const VideoDecoderConfig& vc,
const StreamParser::TextTrackConfigMap& tc) {
DVLOG(1) << "OnNewConfig: audio=" << ac.IsValidConfig()
<< ", video=" << vc.IsValidConfig();
return true;
}
void DumpBuffers(const std::string& label,
const StreamParser::BufferQueue& buffers) {
DVLOG(2) << "DumpBuffers: " << label << " size " << buffers.size();
for (StreamParser::BufferQueue::const_iterator buf = buffers.begin();
buf != buffers.end(); buf++) {
DVLOG(3) << " n=" << buf - buffers.begin()
<< ", size=" << (*buf)->data_size()
<< ", dur=" << (*buf)->duration().InMilliseconds();
} }
} }
bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers, bool OnNewSample(uint32 track_id, const scoped_refptr<MediaSample>& sample) {
const StreamParser::BufferQueue& video_buffers, std::string stream_type;
const StreamParser::TextBufferQueueMap& text_map) { StreamMap::const_iterator stream = stream_map_.find(track_id);
DumpBuffers("audio_buffers", audio_buffers); if (stream != stream_map_.end()) {
DumpBuffers("video_buffers", video_buffers); if (stream->second->stream_type() == kStreamAudio) {
audio_frame_count_ += audio_buffers.size(); ++audio_frame_count_;
video_frame_count_ += video_buffers.size(); stream_type = "audio";
} else if (stream->second->stream_type() == kStreamVideo) {
// TODO(wolenetz/acolwell): Add text track support to more MSE parsers. See ++video_frame_count_;
// http://crbug.com/336926. stream_type = "video";
if (!text_map.empty()) if (video_min_dts_ == kNoTimestamp)
video_min_dts_ = sample->dts();
// Verify timestamps are increasing.
if (video_max_dts_ == kNoTimestamp)
video_max_dts_ = sample->dts();
else if (video_max_dts_ >= sample->dts()) {
LOG(ERROR) << "Video DTS not strictly increasing.";
return false; return false;
}
if (video_min_dts_ == kNoTimestamp() && !video_buffers.empty()) video_max_dts_ = sample->dts();
video_min_dts_ = video_buffers.front()->GetDecodeTimestamp(); } else {
if (!video_buffers.empty()) { LOG(ERROR) << "Missing StreamInfo for track ID " << track_id;
video_max_dts_ = video_buffers.back()->GetDecodeTimestamp();
// Verify monotonicity.
StreamParser::BufferQueue::const_iterator it1 = video_buffers.begin();
StreamParser::BufferQueue::const_iterator it2 = ++it1;
for ( ; it2 != video_buffers.end(); ++it1, ++it2) {
if ((*it2)->GetDecodeTimestamp() < (*it1)->GetDecodeTimestamp())
return false; return false;
} }
} }
@ -114,49 +96,34 @@ class Mp2tStreamParserTest : public testing::Test {
return true; return true;
} }
void OnKeyNeeded(const std::string& type, void OnKeyNeeded(MediaContainerName container_name,
const std::vector<uint8>& init_data) { scoped_ptr<uint8[]> init_data,
DVLOG(1) << "OnKeyNeeded: " << init_data.size(); int init_data_size) {
} DVLOG(1) << "OnKeyNeeded: " << init_data_size;
void OnNewSegment() {
DVLOG(1) << "OnNewSegment";
}
void OnEndOfSegment() {
DVLOG(1) << "OnEndOfSegment()";
} }
void InitializeParser() { void InitializeParser() {
parser_->Init( parser_->Init(
base::Bind(&Mp2tStreamParserTest::OnInit, base::Bind(&Mp2tMediaParserTest::OnInit,
base::Unretained(this)), base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewConfig, base::Bind(&Mp2tMediaParserTest::OnNewSample,
base::Unretained(this)), base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewBuffers, base::Bind(&Mp2tMediaParserTest::OnKeyNeeded,
base::Unretained(this)), base::Unretained(this)));
true,
base::Bind(&Mp2tStreamParserTest::OnKeyNeeded,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewSegment,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnEndOfSegment,
base::Unretained(this)),
LogCB());
} }
bool ParseMpeg2TsFile(const std::string& filename, int append_bytes) { bool ParseMpeg2TsFile(const std::string& filename, int append_bytes) {
InitializeParser(); InitializeParser();
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename); std::vector<uint8> buffer = ReadTestDataFile(filename);
EXPECT_TRUE(AppendDataInPieces(buffer->data(), EXPECT_TRUE(AppendDataInPieces(buffer.data(),
buffer->data_size(), buffer.size(),
append_bytes)); append_bytes));
return true; return true;
} }
}; };
TEST_F(Mp2tStreamParserTest, UnalignedAppend17) { TEST_F(Mp2tMediaParserTest, UnalignedAppend17) {
// Test small, non-segment-aligned appends. // Test small, non-segment-aligned appends.
ParseMpeg2TsFile("bear-1280x720.ts", 17); ParseMpeg2TsFile("bear-1280x720.ts", 17);
EXPECT_EQ(video_frame_count_, 81); EXPECT_EQ(video_frame_count_, 81);
@ -164,7 +131,7 @@ TEST_F(Mp2tStreamParserTest, UnalignedAppend17) {
EXPECT_EQ(video_frame_count_, 82); EXPECT_EQ(video_frame_count_, 82);
} }
TEST_F(Mp2tStreamParserTest, UnalignedAppend512) { TEST_F(Mp2tMediaParserTest, UnalignedAppend512) {
// Test small, non-segment-aligned appends. // Test small, non-segment-aligned appends.
ParseMpeg2TsFile("bear-1280x720.ts", 512); ParseMpeg2TsFile("bear-1280x720.ts", 512);
EXPECT_EQ(video_frame_count_, 81); EXPECT_EQ(video_frame_count_, 81);
@ -172,15 +139,16 @@ TEST_F(Mp2tStreamParserTest, UnalignedAppend512) {
EXPECT_EQ(video_frame_count_, 82); EXPECT_EQ(video_frame_count_, 82);
} }
TEST_F(Mp2tStreamParserTest, TimestampWrapAround) { TEST_F(Mp2tMediaParserTest, TimestampWrapAround) {
// "bear-1280x720_ptswraparound.ts" has been transcoded // "bear-1280x720_ptswraparound.ts" has been transcoded
// from bear-1280x720.mp4 by applying a time offset of 95442s // from bear-1280x720.mp4 by applying a time offset of 95442s
// (close to 2^33 / 90000) which results in timestamps wrap around // (close to 2^33 / 90000) which results in timestamps wrap around
// in the Mpeg2 TS stream. // in the Mpeg2 TS stream.
ParseMpeg2TsFile("bear-1280x720_ptswraparound.ts", 512); ParseMpeg2TsFile("bear-1280x720_ptswraparound.ts", 512);
EXPECT_EQ(video_frame_count_, 81); EXPECT_EQ(video_frame_count_, 81);
EXPECT_GE(video_min_dts_, base::TimeDelta::FromSeconds(95443 - 10)); EXPECT_GE(video_min_dts_, (95443 - 1) * kMpeg2Timescale);
EXPECT_LE(video_max_dts_, base::TimeDelta::FromSeconds(95443 + 10)); EXPECT_LE(video_max_dts_,
static_cast<int64>((95443 + 4)) * kMpeg2Timescale);
} }
} // namespace mp2t } // namespace mp2t

View File

@ -7,7 +7,7 @@
#include "base/logging.h" #include "base/logging.h"
#include "base/strings/string_number_conversions.h" #include "base/strings/string_number_conversions.h"
#include "media/base/bit_reader.h" #include "media/base/bit_reader.h"
#include "media/base/buffers.h" #include "media/base/timestamp.h"
#include "media/formats/mp2t/es_parser.h" #include "media/formats/mp2t/es_parser.h"
#include "media/formats/mp2t/mp2t_common.h" #include "media/formats/mp2t/mp2t_common.h"
@ -266,15 +266,15 @@ bool TsSectionPes::ParseInternal(const uint8* raw_pes, int raw_pes_size) {
} }
// Convert and unroll the timestamps. // Convert and unroll the timestamps.
base::TimeDelta media_pts(kNoTimestamp()); int64 media_pts(kNoTimestamp);
base::TimeDelta media_dts(kNoTimestamp()); int64 media_dts(kNoTimestamp);
if (is_pts_valid) { if (is_pts_valid) {
int64 pts = ConvertTimestampSectionToTimestamp(pts_section); int64 pts = ConvertTimestampSectionToTimestamp(pts_section);
if (previous_pts_valid_) if (previous_pts_valid_)
pts = UnrollTimestamp(previous_pts_, pts); pts = UnrollTimestamp(previous_pts_, pts);
previous_pts_ = pts; previous_pts_ = pts;
previous_pts_valid_ = true; previous_pts_valid_ = true;
media_pts = base::TimeDelta::FromMicroseconds((1000 * pts) / 90); media_pts = pts;
} }
if (is_dts_valid) { if (is_dts_valid) {
int64 dts = ConvertTimestampSectionToTimestamp(dts_section); int64 dts = ConvertTimestampSectionToTimestamp(dts_section);
@ -282,7 +282,7 @@ bool TsSectionPes::ParseInternal(const uint8* raw_pes, int raw_pes_size) {
dts = UnrollTimestamp(previous_dts_, dts); dts = UnrollTimestamp(previous_dts_, dts);
previous_dts_ = dts; previous_dts_ = dts;
previous_dts_valid_ = true; previous_dts_valid_ = true;
media_dts = base::TimeDelta::FromMicroseconds((1000 * dts) / 90); media_dts = dts;
} }
// Discard the rest of the PES packet header. // Discard the rest of the PES packet header.
@ -296,8 +296,8 @@ bool TsSectionPes::ParseInternal(const uint8* raw_pes, int raw_pes_size) {
DVLOG(LOG_LEVEL_PES) DVLOG(LOG_LEVEL_PES)
<< "Emit a reassembled PES:" << "Emit a reassembled PES:"
<< " size=" << es_size << " size=" << es_size
<< " pts=" << media_pts.InMilliseconds() << " pts=" << media_pts
<< " dts=" << media_dts.InMilliseconds() << " dts=" << media_dts
<< " data_alignment_indicator=" << data_alignment_indicator; << " data_alignment_indicator=" << data_alignment_indicator;
return es_parser_->Parse(&raw_pes[es_offset], es_size, media_pts, media_dts); return es_parser_->Parse(&raw_pes[es_offset], es_size, media_pts, media_dts);
} }
@ -309,4 +309,3 @@ void TsSectionPes::ResetPesState() {
} // namespace mp2t } // namespace mp2t
} // namespace media } // namespace media

View File

@ -37,4 +37,3 @@ class TsSectionPmt : public TsSectionPsi {
} // namespace media } // namespace media
#endif #endif

View File

@ -129,4 +129,3 @@ void TsSectionPsi::ResetPsiState() {
} // namespace mp2t } // namespace mp2t
} // namespace media } // namespace media

View File

@ -10,15 +10,15 @@ namespace media {
// The following conversion table is extracted from ISO 14496 Part 3 - // The following conversion table is extracted from ISO 14496 Part 3 -
// Table 1.16 - Sampling Frequency Index. // Table 1.16 - Sampling Frequency Index.
const int kADTSFrequencyTable[] = {96000, 88200, 64000, 48000, 44100, const int kAdtsFrequencyTable[] = {96000, 88200, 64000, 48000, 44100,
32000, 24000, 22050, 16000, 12000, 32000, 24000, 22050, 16000, 12000,
11025, 8000, 7350}; 11025, 8000, 7350};
const size_t kADTSFrequencyTableSize = arraysize(kADTSFrequencyTable); const size_t kAdtsFrequencyTableSize = arraysize(kAdtsFrequencyTable);
// The following conversion table is extracted from ISO 14496 Part 3 - // The following conversion table is extracted from ISO 14496 Part 3 -
// Table 1.17 - Channel Configuration. // Table 1.17 - Channel Configuration.
const int kADTSNumChannelsTable[] = { const int kAdtsNumChannelsTable[] = {
0, 1, 2, 2, 4, 5, 6, 8 }; 0, 1, 2, 2, 4, 5, 6, 8 };
const size_t kADTSNumChannelsTableSize = arraysize(kADTSNumChannelsTable); const size_t kAdtsNumChannelsTableSize = arraysize(kAdtsNumChannelsTable);
} // namespace media } // namespace media

View File

@ -24,7 +24,11 @@ vorbis-packet-1 - timestamp: 0ms, duration: 0ms
vorbis-packet-2 - timestamp: 0ms, duration: 0ms vorbis-packet-2 - timestamp: 0ms, duration: 0ms
vorbis-packet-3 - timestamp: 2902ms, duration: 0ms vorbis-packet-3 - timestamp: 2902ms, duration: 0ms
// Encrypted Files // Transport streams.
bear-1280x720.ts - AVC + AAC encode, multiplexed into an MPEG2-TS container.
bear-1280x720_ptswraparound.ts - Same as bear-1280x720.ts, with a timestamp wrap-around in the middle.
// Encrypted Files.
bear-1280x720-a_frag-cenc.mp4 - A fragmented MP4 version of the audio track of bear-1280x720.mp4 encrypted (ISO CENC) using key ID [1] and key [2]. bear-1280x720-a_frag-cenc.mp4 - A fragmented MP4 version of the audio track of bear-1280x720.mp4 encrypted (ISO CENC) using key ID [1] and key [2].
bear-1280x720-a_frag-cenc_clear-all.mp4 - Same as bear-1280x720-a_frag-cenc.mp4 but no fragments are encrypted. bear-1280x720-a_frag-cenc_clear-all.mp4 - Same as bear-1280x720-a_frag-cenc.mp4 but no fragments are encrypted.
bear-1280x720-v_frag-cenc.mp4 - A fragmented MP4 version of the video track of bear-1280x720.mp4 encrypted (ISO CENC) using key ID [1] and key [2]. bear-1280x720-v_frag-cenc.mp4 - A fragmented MP4 version of the video track of bear-1280x720.mp4 encrypted (ISO CENC) using key ID [1] and key [2].

Binary file not shown.

Binary file not shown.