Modified mp2t::MediaParser to work with the packaging SDK.

Added transport stream test data.

Change-Id: I2f20d0a67eb5a1157ceed08af67368895016170c
This commit is contained in:
Thomas Inskip 2014-04-10 12:57:10 -07:00
parent 20e66b2109
commit c5f1e5eb7a
17 changed files with 275 additions and 528 deletions

View File

@ -12,14 +12,17 @@
namespace media {
class MediaSample;
class StreamInfo;
namespace mp2t {
class EsParser {
public:
typedef base::Callback<void(scoped_refptr<MediaSample>&)> EmitSampleCB;
typedef base::Callback<void(scoped_refptr<StreamInfo>&)> NewStreamInfoCB;
typedef base::Callback<void(uint32, scoped_refptr<MediaSample>&)> EmitSampleCB;
EsParser(uint32 track_id) : track_id_(track_id) {}
EsParser(uint32 pid)
: pid_(pid) {}
virtual ~EsParser() {}
// ES parsing.
@ -32,10 +35,10 @@ class EsParser {
// Reset the state of the ES parser.
virtual void Reset() = 0;
uint32 track_id() { return track_id_; }
uint32 pid() { return pid_; }
private:
uint32 track_id_;
uint32 pid_;
};
} // namespace mp2t

View File

@ -98,12 +98,12 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
namespace mp2t {
EsParserAdts::EsParserAdts(
uint32 track_id,
const NewAudioConfigCB& new_audio_config_cb,
uint32 pid,
const NewStreamInfoCB& new_stream_info_cb,
const EmitSampleCB& emit_sample_cb,
bool sbr_in_mimetype)
: EsParser(track_id),
new_audio_config_cb_(new_audio_config_cb),
: EsParser(pid),
new_stream_info_cb_(new_stream_info_cb),
emit_sample_cb_(emit_sample_cb),
sbr_in_mimetype_(sbr_in_mimetype) {
}
@ -170,7 +170,7 @@ bool EsParserAdts::Parse(const uint8* buf, int size, int64 pts, int64 dts) {
sample->set_pts(current_pts);
sample->set_dts(current_pts);
sample->set_duration(frame_duration);
emit_sample_cb_.Run(sample);
emit_sample_cb_.Run(pid(), sample);
// Update the PTS of the next frame.
audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame);
@ -229,9 +229,9 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
? std::min(2 * samples_per_second, 48000)
: samples_per_second;
last_audio_decoder_config_ = scoped_refptr<AudioStreamInfo>(
last_audio_decoder_config_ = scoped_refptr<StreamInfo>(
new AudioStreamInfo(
track_id(),
pid(),
kMpeg2Timescale,
kInfiniteDuration,
kCodecAAC,
@ -260,7 +260,7 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
}
// Audio config notification.
new_audio_config_cb_.Run(last_audio_decoder_config_);
new_stream_info_cb_.Run(last_audio_decoder_config_);
return true;
}

View File

@ -25,11 +25,8 @@ namespace mp2t {
class EsParserAdts : public EsParser {
public:
typedef base::Callback<void(
scoped_refptr<AudioStreamInfo>&)> NewAudioConfigCB;
EsParserAdts(uint32 track_id,
const NewAudioConfigCB& new_audio_config_cb,
EsParserAdts(uint32 pid,
const NewStreamInfoCB& new_stream_info_cb,
const EmitSampleCB& emit_sample_cb,
bool sbr_in_mimetype);
virtual ~EsParserAdts();
@ -57,7 +54,7 @@ class EsParserAdts : public EsParser {
// Callbacks:
// - to signal a new audio configuration,
// - to send ES buffers.
NewAudioConfigCB new_audio_config_cb_;
NewStreamInfoCB new_stream_info_cb_;
EmitSampleCB emit_sample_cb_;
// True when AAC SBR extension is signalled in the mimetype
@ -73,7 +70,7 @@ class EsParserAdts : public EsParser {
// Interpolated PTS for frames that don't have one.
scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
scoped_refptr<AudioStreamInfo> last_audio_decoder_config_;
scoped_refptr<StreamInfo> last_audio_decoder_config_;
DISALLOW_COPY_AND_ASSIGN(EsParserAdts);
};

View File

@ -35,11 +35,11 @@ const uint8 kCommonNaluLengthSize = 4;
} // anonymous namespace
EsParserH264::EsParserH264(
uint32 track_id,
const NewVideoConfigCB& new_video_config_cb,
uint32 pid,
const NewStreamInfoCB& new_stream_info_cb,
const EmitSampleCB& emit_sample_cb)
: EsParser(track_id),
new_video_config_cb_(new_video_config_cb),
: EsParser(pid),
new_stream_info_cb_(new_stream_info_cb),
emit_sample_cb_(emit_sample_cb),
es_queue_(new media::OffsetByteQueue()),
h264_parser_(new H264Parser()),
@ -95,7 +95,7 @@ void EsParserH264::Reset() {
current_access_unit_pos_ = 0;
next_access_unit_pos_ = 0;
timing_desc_list_.clear();
last_video_decoder_config_ = scoped_refptr<VideoStreamInfo>();
last_video_decoder_config_ = scoped_refptr<StreamInfo>();
}
bool EsParserH264::FindAUD(int64* stream_pos) {
@ -282,7 +282,7 @@ bool EsParserH264::EmitFrame(int64 access_unit_pos, int access_unit_size,
is_key_frame);
media_sample->set_dts(current_timing_desc.dts);
media_sample->set_pts(current_timing_desc.pts);
emit_sample_cb_.Run(media_sample);
emit_sample_cb_.Run(pid(), media_sample);
return true;
}
@ -299,9 +299,9 @@ bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps) {
uint16 width = (sps->pic_width_in_mbs_minus1 + 1) * 16;
uint16 height = (sps->pic_height_in_map_units_minus1 + 1) * 16;
last_video_decoder_config_ = scoped_refptr<VideoStreamInfo>(
last_video_decoder_config_ = scoped_refptr<StreamInfo>(
new VideoStreamInfo(
track_id(),
pid(),
kMpeg2Timescale,
kInfiniteDuration,
kCodecH264,
@ -323,7 +323,7 @@ bool EsParserH264::UpdateVideoDecoderConfig(const H264SPS* sps) {
<< " height=" << sps->sar_height;
// Video config notification.
new_video_config_cb_.Run(last_video_decoder_config_);
new_stream_info_cb_.Run(last_video_decoder_config_);
return true;
}

View File

@ -34,11 +34,8 @@ namespace mp2t {
//
class EsParserH264 : public EsParser {
public:
typedef base::Callback<void(
scoped_refptr<VideoStreamInfo>&)> NewVideoConfigCB;
EsParserH264(uint32 track_id,
const NewVideoConfigCB& new_video_config_cb,
EsParserH264(uint32 pid,
const NewStreamInfoCB& new_stream_info_cb,
const EmitSampleCB& emit_sample_cb);
virtual ~EsParserH264();
@ -74,7 +71,7 @@ class EsParserH264 : public EsParser {
bool UpdateVideoDecoderConfig(const filters::H264SPS* sps);
// Callbacks to pass the stream configuration and the frames.
NewVideoConfigCB new_video_config_cb_;
NewStreamInfoCB new_stream_info_cb_;
EmitSampleCB emit_sample_cb_;
// Bytes of the ES stream that have not been emitted yet.
@ -89,7 +86,7 @@ class EsParserH264 : public EsParser {
int64 next_access_unit_pos_;
// Last video decoder config.
scoped_refptr<VideoStreamInfo> last_video_decoder_config_;
scoped_refptr<StreamInfo> last_video_decoder_config_;
};
} // namespace mp2t

View File

@ -129,11 +129,11 @@ class EsParserH264Test : public testing::Test {
void LoadStream(const char* filename);
void ProcessPesPackets(const std::vector<Packet>& pes_packets);
void EmitSample(scoped_refptr<MediaSample>& sample) {
void EmitSample(uint32 pid, scoped_refptr<MediaSample>& sample) {
sample_count_++;
}
void NewVideoConfig(scoped_refptr<VideoStreamInfo>& config) {
void NewVideoConfig(scoped_refptr<StreamInfo>& config) {
}
size_t sample_count() const { return sample_count_; }

View File

@ -24,6 +24,18 @@
'es_parser_adts.h',
'es_parser_h264.cc',
'es_parser_h264.h',
'mp2t_media_parser.cc',
'mp2t_media_parser.h',
'ts_packet.cc',
'ts_packet.h',
'ts_section_pat.cc',
'ts_section_pat.h',
'ts_section_pes.cc',
'ts_section_pes.h',
'ts_section_pmt.cc',
'ts_section_pmt.h',
'ts_section_psi.cc',
'ts_section_psi.h',
],
'dependencies': [
'../../base/media_base.gyp:base',
@ -34,12 +46,14 @@
'type': '<(gtest_target_type)',
'sources': [
'es_parser_h264_unittest.cc',
'mp2t_media_parser_unittest.cc',
],
'dependencies': [
'../../../testing/gtest.gyp:gtest',
'../../../testing/gmock.gyp:gmock',
'../../filters/filters.gyp:filters',
'../../test/media_test.gyp:media_test_support',
'../mpeg/mpeg.gyp:mpeg',
'mp2t',
]
},

View File

@ -2,16 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "media/formats/mp2t/mp2t_stream_parser.h"
#include "media/formats/mp2t/mp2t_media_parser.h"
#include "base/bind.h"
#include "base/memory/scoped_ptr.h"
#include "base/stl_util.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/buffers.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/base/media_sample.h"
#include "media/base/stream_info.h"
#include "media/formats/mp2t/es_parser.h"
#include "media/formats/mp2t/es_parser_adts.h"
#include "media/formats/mp2t/es_parser_h264.h"
@ -41,7 +38,7 @@ class PidState {
kPidVideoPes,
};
PidState(int pid, PidType pid_tyoe,
PidState(int pid, PidType pid_type,
scoped_ptr<TsSection> section_parser);
// Extract the content of the TS packet and parse it.
@ -61,6 +58,11 @@ class PidState {
PidType pid_type() const { return pid_type_; }
scoped_refptr<StreamInfo>& config() { return config_; }
void set_config(scoped_refptr<StreamInfo>& config) { config_ = config; }
SampleQueue& sample_queue() { return sample_queue_; }
private:
void ResetState();
@ -69,8 +71,9 @@ class PidState {
scoped_ptr<TsSection> section_parser_;
bool enable_;
int continuity_counter_;
scoped_refptr<StreamInfo> config_;
SampleQueue sample_queue_;
};
PidState::PidState(int pid, PidType pid_type,
@ -95,6 +98,7 @@ bool PidState::PushTsPacket(const TsPacket& ts_packet) {
if (continuity_counter_ >= 0 &&
ts_packet.continuity_counter() != expected_continuity_counter) {
DVLOG(1) << "TS discontinuity detected for pid: " << pid_;
// TODO(tinskip): Handle discontinuity better.
return false;
}
@ -104,7 +108,7 @@ bool PidState::PushTsPacket(const TsPacket& ts_packet) {
ts_packet.payload_size());
// At the minimum, when parsing failed, auto reset the section parser.
// Components that use the StreamParser can take further action if needed.
// Components that use the MediaParser can take further action if needed.
if (!status) {
DVLOG(1) << "Parsing failed for pid = " << pid_;
ResetState();
@ -139,59 +143,32 @@ void PidState::ResetState() {
continuity_counter_ = -1;
}
Mp2tStreamParser::BufferQueueWithConfig::BufferQueueWithConfig(
bool is_cfg_sent,
const AudioDecoderConfig& audio_cfg,
const VideoDecoderConfig& video_cfg)
: is_config_sent(is_cfg_sent),
audio_config(audio_cfg),
video_config(video_cfg) {
MediaParser::MediaParser()
: sbr_in_mimetype_(false),
is_initialized_(false) {
}
Mp2tStreamParser::BufferQueueWithConfig::~BufferQueueWithConfig() {
}
Mp2tStreamParser::Mp2tStreamParser(bool sbr_in_mimetype)
: sbr_in_mimetype_(sbr_in_mimetype),
selected_audio_pid_(-1),
selected_video_pid_(-1),
is_initialized_(false),
segment_started_(false),
first_video_frame_in_segment_(true) {
}
Mp2tStreamParser::~Mp2tStreamParser() {
MediaParser::~MediaParser() {
STLDeleteValues(&pids_);
}
void Mp2tStreamParser::Init(
void MediaParser::Init(
const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
bool /* ignore_text_tracks */ ,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) {
const NewSampleCB& new_sample_cb,
const NeedKeyCB& need_key_cb) {
DCHECK(!is_initialized_);
DCHECK(init_cb_.is_null());
DCHECK(!init_cb.is_null());
DCHECK(!config_cb.is_null());
DCHECK(!new_buffers_cb.is_null());
DCHECK(!new_sample_cb.is_null());
DCHECK(!need_key_cb.is_null());
DCHECK(!end_of_segment_cb.is_null());
init_cb_ = init_cb;
config_cb_ = config_cb;
new_buffers_cb_ = new_buffers_cb;
new_sample_cb_ = new_sample_cb;
need_key_cb_ = need_key_cb;
new_segment_cb_ = new_segment_cb;
end_of_segment_cb_ = end_of_segment_cb;
log_cb_ = log_cb;
}
void Mp2tStreamParser::Flush() {
DVLOG(1) << "Mp2tStreamParser::Flush";
void MediaParser::Flush() {
DVLOG(1) << "MediaParser::Flush";
// Flush the buffers and reset the pids.
for (std::map<int, PidState*>::iterator it = pids_.begin();
@ -199,29 +176,17 @@ void Mp2tStreamParser::Flush() {
DVLOG(1) << "Flushing PID: " << it->first;
PidState* pid_state = it->second;
pid_state->Flush();
delete pid_state;
}
pids_.clear();
EmitRemainingBuffers();
buffer_queue_chain_.clear();
// End of the segment.
// Note: does not need to invoke |end_of_segment_cb_| since flushing the
// stream parser already involves the end of the current segment.
segment_started_ = false;
first_video_frame_in_segment_ = true;
EmitRemainingSamples();
STLDeleteValues(&pids_);
// Remove any bytes left in the TS buffer.
// (i.e. any partial TS packet => less than 188 bytes).
ts_byte_queue_.Reset();
// Reset the selected PIDs.
selected_audio_pid_ = -1;
selected_video_pid_ = -1;
}
bool Mp2tStreamParser::Parse(const uint8* buf, int size) {
DVLOG(1) << "Mp2tStreamParser::Parse size=" << size;
bool MediaParser::Parse(const uint8* buf, int size) {
DVLOG(1) << "MediaParser::Parse size=" << size;
// Add the data to the parser state.
ts_byte_queue_.Push(buf, size);
@ -260,7 +225,7 @@ bool Mp2tStreamParser::Parse(const uint8* buf, int size) {
// Create the PAT state here if needed.
scoped_ptr<TsSection> pat_section_parser(
new TsSectionPat(
base::Bind(&Mp2tStreamParser::RegisterPmt,
base::Bind(&MediaParser::RegisterPmt,
base::Unretained(this))));
scoped_ptr<PidState> pat_pid_state(
new PidState(ts_packet->pid(), PidState::kPidPat,
@ -282,13 +247,11 @@ bool Mp2tStreamParser::Parse(const uint8* buf, int size) {
ts_byte_queue_.Pop(TsPacket::kPacketSize);
}
RCHECK(FinishInitializationIfNeeded());
// Emit the A/V buffers that kept accumulating during TS parsing.
return EmitRemainingBuffers();
return EmitRemainingSamples();
}
void Mp2tStreamParser::RegisterPmt(int program_number, int pmt_pid) {
void MediaParser::RegisterPmt(int program_number, int pmt_pid) {
DVLOG(1) << "RegisterPmt:"
<< " program_number=" << program_number
<< " pmt_pid=" << pmt_pid;
@ -308,7 +271,7 @@ void Mp2tStreamParser::RegisterPmt(int program_number, int pmt_pid) {
DVLOG(1) << "Create a new PMT parser";
scoped_ptr<TsSection> pmt_section_parser(
new TsSectionPmt(
base::Bind(&Mp2tStreamParser::RegisterPes,
base::Bind(&MediaParser::RegisterPes,
base::Unretained(this), pmt_pid)));
scoped_ptr<PidState> pmt_pid_state(
new PidState(pmt_pid, PidState::kPidPmt, pmt_section_parser.Pass()));
@ -316,7 +279,7 @@ void Mp2tStreamParser::RegisterPmt(int program_number, int pmt_pid) {
pids_.insert(std::pair<int, PidState*>(pmt_pid, pmt_pid_state.release()));
}
void Mp2tStreamParser::RegisterPes(int pmt_pid,
void MediaParser::RegisterPes(int pmt_pid,
int pes_pid,
int stream_type) {
// TODO(damienv): check there is no mismatch if the entry already exists.
@ -333,21 +296,19 @@ void Mp2tStreamParser::RegisterPes(int pmt_pid,
if (stream_type == kStreamTypeAVC) {
es_parser.reset(
new EsParserH264(
base::Bind(&Mp2tStreamParser::OnVideoConfigChanged,
base::Unretained(this),
pes_pid),
base::Bind(&Mp2tStreamParser::OnEmitVideoBuffer,
base::Unretained(this),
pes_pid)));
pes_pid,
base::Bind(&MediaParser::OnNewStreamInfo,
base::Unretained(this)),
base::Bind(&MediaParser::OnEmitSample,
base::Unretained(this))));
} else if (stream_type == kStreamTypeAAC) {
es_parser.reset(
new EsParserAdts(
base::Bind(&Mp2tStreamParser::OnAudioConfigChanged,
base::Unretained(this),
pes_pid),
base::Bind(&Mp2tStreamParser::OnEmitAudioBuffer,
base::Unretained(this),
pes_pid),
pes_pid,
base::Bind(&MediaParser::OnNewStreamInfo,
base::Unretained(this)),
base::Bind(&MediaParser::OnEmitSample,
base::Unretained(this)),
sbr_in_mimetype_));
is_audio = true;
} else {
@ -362,261 +323,107 @@ void Mp2tStreamParser::RegisterPes(int pmt_pid,
is_audio ? PidState::kPidAudioPes : PidState::kPidVideoPes;
scoped_ptr<PidState> pes_pid_state(
new PidState(pes_pid, pid_type, pes_section_parser.Pass()));
pes_pid_state->Enable();
pids_.insert(std::pair<int, PidState*>(pes_pid, pes_pid_state.release()));
// A new PES pid has been added, the PID filter might change.
UpdatePidFilter();
}
void Mp2tStreamParser::UpdatePidFilter() {
// Applies the HLS rule to select the default audio/video PIDs:
// select the audio/video streams with the lowest PID.
// TODO(damienv): this can be changed when the StreamParser interface
// supports multiple audio/video streams.
PidMap::iterator lowest_audio_pid = pids_.end();
PidMap::iterator lowest_video_pid = pids_.end();
for (PidMap::iterator it = pids_.begin(); it != pids_.end(); ++it) {
int pid = it->first;
PidState* pid_state = it->second;
if (pid_state->pid_type() == PidState::kPidAudioPes &&
(lowest_audio_pid == pids_.end() || pid < lowest_audio_pid->first))
lowest_audio_pid = it;
if (pid_state->pid_type() == PidState::kPidVideoPes &&
(lowest_video_pid == pids_.end() || pid < lowest_video_pid->first))
lowest_video_pid = it;
void MediaParser::OnNewStreamInfo(
scoped_refptr<StreamInfo>& new_stream_info) {
DCHECK(new_stream_info);
DVLOG(1) << "OnVideoConfigChanged for pid=" << new_stream_info->track_id();
PidMap::iterator pid_state = pids_.find(new_stream_info->track_id());
if (pid_state == pids_.end()) {
LOG(ERROR) << "PID State for new stream not found (pid = "
<< new_stream_info->track_id() << ").";
return;
}
// Enable both the lowest audio and video PIDs.
if (lowest_audio_pid != pids_.end()) {
DVLOG(1) << "Enable audio pid: " << lowest_audio_pid->first;
lowest_audio_pid->second->Enable();
selected_audio_pid_ = lowest_audio_pid->first;
}
if (lowest_video_pid != pids_.end()) {
DVLOG(1) << "Enable video pid: " << lowest_video_pid->first;
lowest_video_pid->second->Enable();
selected_video_pid_ = lowest_video_pid->first;
// Set the stream configuration information for the PID.
pid_state->second->set_config(new_stream_info);
// Finish initialization if all streams have configs.
FinishInitializationIfNeeded();
}
// Disable all the other audio and video PIDs.
for (PidMap::iterator it = pids_.begin(); it != pids_.end(); ++it) {
PidState* pid_state = it->second;
if (it != lowest_audio_pid && it != lowest_video_pid &&
(pid_state->pid_type() == PidState::kPidAudioPes ||
pid_state->pid_type() == PidState::kPidVideoPes))
pid_state->Disable();
}
}
void Mp2tStreamParser::OnVideoConfigChanged(
int pes_pid,
const VideoDecoderConfig& video_decoder_config) {
DVLOG(1) << "OnVideoConfigChanged for pid=" << pes_pid;
DCHECK_EQ(pes_pid, selected_video_pid_);
DCHECK(video_decoder_config.IsValidConfig());
// Create a new entry in |buffer_queue_chain_| with the updated configs.
BufferQueueWithConfig buffer_queue_with_config(
false,
buffer_queue_chain_.empty()
? AudioDecoderConfig() : buffer_queue_chain_.back().audio_config,
video_decoder_config);
buffer_queue_chain_.push_back(buffer_queue_with_config);
// Replace any non valid config with the 1st valid entry.
// This might happen if there was no available config before.
for (std::list<BufferQueueWithConfig>::iterator it =
buffer_queue_chain_.begin(); it != buffer_queue_chain_.end(); ++it) {
if (it->video_config.IsValidConfig())
break;
it->video_config = video_decoder_config;
}
}
void Mp2tStreamParser::OnAudioConfigChanged(
int pes_pid,
const AudioDecoderConfig& audio_decoder_config) {
DVLOG(1) << "OnAudioConfigChanged for pid=" << pes_pid;
DCHECK_EQ(pes_pid, selected_audio_pid_);
DCHECK(audio_decoder_config.IsValidConfig());
// Create a new entry in |buffer_queue_chain_| with the updated configs.
BufferQueueWithConfig buffer_queue_with_config(
false,
audio_decoder_config,
buffer_queue_chain_.empty()
? VideoDecoderConfig() : buffer_queue_chain_.back().video_config);
buffer_queue_chain_.push_back(buffer_queue_with_config);
// Replace any non valid config with the 1st valid entry.
// This might happen if there was no available config before.
for (std::list<BufferQueueWithConfig>::iterator it =
buffer_queue_chain_.begin(); it != buffer_queue_chain_.end(); ++it) {
if (it->audio_config.IsValidConfig())
break;
it->audio_config = audio_decoder_config;
}
}
bool Mp2tStreamParser::FinishInitializationIfNeeded() {
bool MediaParser::FinishInitializationIfNeeded() {
// Nothing to be done if already initialized.
if (is_initialized_)
return true;
// Wait for more data to come to finish initialization.
if (buffer_queue_chain_.empty())
if (pids_.empty())
return true;
// Wait for more data to come if one of the config is not available.
BufferQueueWithConfig& queue_with_config = buffer_queue_chain_.front();
if (selected_audio_pid_ > 0 &&
!queue_with_config.audio_config.IsValidConfig())
return true;
if (selected_video_pid_ > 0 &&
!queue_with_config.video_config.IsValidConfig())
return true;
// Pass the config before invoking the initialization callback.
RCHECK(config_cb_.Run(queue_with_config.audio_config,
queue_with_config.video_config,
TextTrackConfigMap()));
queue_with_config.is_config_sent = true;
// For Mpeg2 TS, the duration is not known.
std::vector<scoped_refptr<StreamInfo> > all_stream_info;
uint32 num_es(0);
for (PidMap::const_iterator iter = pids_.begin(); iter != pids_.end();
++iter) {
if (((iter->second->pid_type() == PidState::kPidAudioPes) ||
(iter->second->pid_type() == PidState::kPidVideoPes))) {
++num_es;
if (iter->second->config())
all_stream_info.push_back(iter->second->config());
}
}
if (num_es && (all_stream_info.size() == num_es)) {
// All stream configurations have been received. Initialization can
// be completed.
init_cb_.Run(all_stream_info);
DVLOG(1) << "Mpeg2TS stream parser initialization done";
init_cb_.Run(true, kInfiniteDuration(), false);
is_initialized_ = true;
}
return true;
}
void Mp2tStreamParser::OnEmitAudioBuffer(
int pes_pid,
scoped_refptr<StreamParserBuffer> stream_parser_buffer) {
DCHECK_EQ(pes_pid, selected_audio_pid_);
void MediaParser::OnEmitSample(uint32 pes_pid,
scoped_refptr<MediaSample>& new_sample) {
DCHECK(new_sample);
DVLOG(LOG_LEVEL_ES)
<< "OnEmitAudioBuffer: "
<< "OnEmitSample: "
<< " pid="
<< pes_pid
<< " size="
<< stream_parser_buffer->data_size()
<< new_sample->data_size()
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds()
<< new_sample->dts()
<< " pts="
<< stream_parser_buffer->timestamp().InMilliseconds();
stream_parser_buffer->set_timestamp(
stream_parser_buffer->timestamp() - time_offset_);
stream_parser_buffer->SetDecodeTimestamp(
stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
<< new_sample->pts();
// Ignore the incoming buffer if it is not associated with any config.
if (buffer_queue_chain_.empty()) {
DVLOG(1) << "Ignoring audio buffer with no corresponding audio config";
// Add the sample to the appropriate PID sample queue.
PidMap::iterator pid_state = pids_.find(pes_pid);
if (pid_state == pids_.end()) {
LOG(ERROR) << "PID State for new sample not found (pid = "
<< pes_pid << ").";
return;
}
buffer_queue_chain_.back().audio_queue.push_back(stream_parser_buffer);
pid_state->second->sample_queue().push_back(new_sample);
}
void Mp2tStreamParser::OnEmitVideoBuffer(
int pes_pid,
scoped_refptr<StreamParserBuffer> stream_parser_buffer) {
DCHECK_EQ(pes_pid, selected_video_pid_);
DVLOG(LOG_LEVEL_ES)
<< "OnEmitVideoBuffer"
<< " size="
<< stream_parser_buffer->data_size()
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds()
<< " pts="
<< stream_parser_buffer->timestamp().InMilliseconds()
<< " IsKeyframe="
<< stream_parser_buffer->IsKeyframe();
stream_parser_buffer->set_timestamp(
stream_parser_buffer->timestamp() - time_offset_);
stream_parser_buffer->SetDecodeTimestamp(
stream_parser_buffer->GetDecodeTimestamp() - time_offset_);
// Ignore the incoming buffer if it is not associated with any config.
if (buffer_queue_chain_.empty()) {
DVLOG(1) << "Ignoring video buffer with no corresponding video config:"
<< " keyframe=" << stream_parser_buffer->IsKeyframe()
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
return;
}
// A segment cannot start with a non key frame.
// Ignore the frame if that's the case.
if (first_video_frame_in_segment_ && !stream_parser_buffer->IsKeyframe()) {
DVLOG(1) << "Ignoring non-key frame:"
<< " dts="
<< stream_parser_buffer->GetDecodeTimestamp().InMilliseconds();
return;
}
first_video_frame_in_segment_ = false;
buffer_queue_chain_.back().video_queue.push_back(stream_parser_buffer);
}
bool Mp2tStreamParser::EmitRemainingBuffers() {
DVLOG(LOG_LEVEL_ES) << "Mp2tStreamParser::EmitRemainingBuffers";
bool MediaParser::EmitRemainingSamples() {
DVLOG(LOG_LEVEL_ES) << "mp2t::MediaParser::EmitRemainingBuffers";
// No buffer should be sent until fully initialized.
if (!is_initialized_)
return true;
if (buffer_queue_chain_.empty())
return true;
// Keep track of the last audio and video config sent.
AudioDecoderConfig last_audio_config =
buffer_queue_chain_.back().audio_config;
VideoDecoderConfig last_video_config =
buffer_queue_chain_.back().video_config;
// Buffer emission.
while (!buffer_queue_chain_.empty()) {
// Start a segment if needed.
if (!segment_started_) {
DVLOG(1) << "Starting a new segment";
segment_started_ = true;
new_segment_cb_.Run();
}
// Update the audio and video config if needed.
BufferQueueWithConfig& queue_with_config = buffer_queue_chain_.front();
if (!queue_with_config.is_config_sent) {
if (!config_cb_.Run(queue_with_config.audio_config,
queue_with_config.video_config,
TextTrackConfigMap()))
return false;
queue_with_config.is_config_sent = true;
}
// Add buffers.
TextBufferQueueMap empty_text_map;
if (!queue_with_config.audio_queue.empty() ||
!queue_with_config.video_queue.empty()) {
if (!new_buffers_cb_.Run(queue_with_config.audio_queue,
queue_with_config.video_queue,
empty_text_map)) {
for (PidMap::const_iterator pid_iter = pids_.begin(); pid_iter != pids_.end();
++pid_iter) {
SampleQueue& sample_queue = pid_iter->second->sample_queue();
for (SampleQueue::iterator sample_iter = sample_queue.begin();
sample_iter != sample_queue.end();
++sample_iter) {
if (!new_sample_cb_.Run(pid_iter->first, *sample_iter)) {
// Error processing sample. Propagate error condition.
return false;
}
}
buffer_queue_chain_.pop_front();
sample_queue.clear();
}
// Push an empty queue with the last audio/video config
// so that buffers with the same config can be added later on.
BufferQueueWithConfig queue_with_config(
true, last_audio_config, last_video_config);
buffer_queue_chain_.push_back(queue_with_config);
return true;
}
} // namespace mp2t
} // namespace media

View File

@ -2,135 +2,95 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef MEDIA_FORMATS_MP2T_MP2T_STREAM_PARSER_H_
#define MEDIA_FORMATS_MP2T_MP2T_STREAM_PARSER_H_
#ifndef MEDIA_FORMATS_MP2T_MP2T_MEDIA_PARSER_H_
#define MEDIA_FORMATS_MP2T_MP2T_MEDIA_PARSER_H_
#include <list>
#include <deque>
#include <map>
#include "base/memory/ref_counted.h"
#include "base/memory/scoped_ptr.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/byte_queue.h"
#include "media/base/media_export.h"
#include "media/base/stream_parser.h"
#include "media/base/video_decoder_config.h"
#include "media/base/media_parser.h"
#include "media/base/stream_info.h"
namespace media {
class StreamParserBuffer;
class MediaSample;
namespace mp2t {
class PidState;
class TsPacket;
class TsSection;
class MEDIA_EXPORT Mp2tStreamParser : public StreamParser {
typedef std::deque<scoped_refptr<MediaSample> > SampleQueue;
class MediaParser : public media::MediaParser {
public:
explicit Mp2tStreamParser(bool sbr_in_mimetype);
virtual ~Mp2tStreamParser();
explicit MediaParser();
virtual ~MediaParser();
// StreamParser implementation.
// media::MediaParser implementation.
virtual void Init(const InitCB& init_cb,
const NewConfigCB& config_cb,
const NewBuffersCB& new_buffers_cb,
bool ignore_text_tracks,
const NeedKeyCB& need_key_cb,
const NewMediaSegmentCB& new_segment_cb,
const base::Closure& end_of_segment_cb,
const LogCB& log_cb) OVERRIDE;
const NewSampleCB& new_sample_cb,
const NeedKeyCB& need_key_cb) OVERRIDE;
virtual void Flush() OVERRIDE;
virtual bool Parse(const uint8* buf, int size) OVERRIDE;
private:
typedef std::map<int, PidState*> PidMap;
struct BufferQueueWithConfig {
BufferQueueWithConfig(bool is_cfg_sent,
const AudioDecoderConfig& audio_cfg,
const VideoDecoderConfig& video_cfg);
~BufferQueueWithConfig();
bool is_config_sent;
AudioDecoderConfig audio_config;
StreamParser::BufferQueue audio_queue;
VideoDecoderConfig video_config;
StreamParser::BufferQueue video_queue;
};
// Callback invoked to register a Program Map Table.
// Note: Does nothing if the PID is already registered.
void RegisterPmt(int program_number, int pmt_pid);
// Callback invoked to register a PES pid.
// Possible values for |stream_type| are defined in:
// ISO-13818.1 / ITU H.222 Table 2.34 "Stream type assignments".
// Possible values for |media_type| are defined in:
// ISO-13818.1 / ITU H.222 Table 2.34 "Media type assignments".
// |pes_pid| is part of the Program Map Table refered by |pmt_pid|.
void RegisterPes(int pmt_pid, int pes_pid, int stream_type);
// Since the StreamParser interface allows only one audio & video streams,
// an automatic PID filtering should be applied to select the audio & video
// streams.
void UpdatePidFilter();
void RegisterPes(int pmt_pid, int pes_pid, int media_type);
// Callback invoked each time the audio/video decoder configuration is
// changed.
void OnVideoConfigChanged(int pes_pid,
const VideoDecoderConfig& video_decoder_config);
void OnAudioConfigChanged(int pes_pid,
const AudioDecoderConfig& audio_decoder_config);
void OnNewStreamInfo(scoped_refptr<StreamInfo>& new_stream_info);
// Callback invoked by the ES media parser
// to emit a new audio/video access unit.
void OnEmitSample(uint32 pes_pid, scoped_refptr<MediaSample>& new_sample);
// Invoke the initialization callback if needed.
bool FinishInitializationIfNeeded();
// Callback invoked by the ES stream parser
// to emit a new audio/video access unit.
void OnEmitAudioBuffer(
int pes_pid,
scoped_refptr<StreamParserBuffer> stream_parser_buffer);
void OnEmitVideoBuffer(
int pes_pid,
scoped_refptr<StreamParserBuffer> stream_parser_buffer);
bool EmitRemainingBuffers();
bool EmitRemainingSamples();
/// Set the value of the "SBR in mime-type" flag which leads to sample rate
/// doubling. Default value is false.
void set_sbr_in_mime_type(bool sbr_in_mimetype) {
sbr_in_mimetype_ = sbr_in_mimetype; }
// List of callbacks.
InitCB init_cb_;
NewConfigCB config_cb_;
NewBuffersCB new_buffers_cb_;
NewSampleCB new_sample_cb_;
NeedKeyCB need_key_cb_;
NewMediaSegmentCB new_segment_cb_;
base::Closure end_of_segment_cb_;
LogCB log_cb_;
// True when AAC SBR extension is signalled in the mimetype
// (mp4a.40.5 in the codecs parameter).
bool sbr_in_mimetype_;
// Bytes of the TS stream.
// Bytes of the TS media.
ByteQueue ts_byte_queue_;
// List of PIDs and their state.
// List of PIDs and their states.
PidMap pids_;
// Selected audio and video PIDs.
int selected_audio_pid_;
int selected_video_pid_;
// Pending audio & video buffers.
std::list<BufferQueueWithConfig> buffer_queue_chain_;
// Whether |init_cb_| has been invoked.
bool is_initialized_;
// Indicate whether a segment was started.
bool segment_started_;
bool first_video_frame_in_segment_;
base::TimeDelta time_offset_;
DISALLOW_COPY_AND_ASSIGN(Mp2tStreamParser);
DISALLOW_COPY_AND_ASSIGN(MediaParser);
};
} // namespace mp2t
} // namespace media
#endif

View File

@ -9,36 +9,37 @@
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/decoder_buffer.h"
#include "media/base/stream_parser_buffer.h"
#include "media/base/test_data_util.h"
#include "media/base/text_track_config.h"
#include "media/base/video_decoder_config.h"
#include "media/formats/mp2t/mp2t_stream_parser.h"
#include "media/base/media_sample.h"
#include "media/base/stream_info.h"
#include "media/base/timestamp.h"
#include "media/base/video_stream_info.h"
#include "media/formats/mp2t/mp2t_common.h"
#include "media/formats/mp2t/mp2t_media_parser.h"
#include "media/test/test_data_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace media {
namespace mp2t {
class Mp2tStreamParserTest : public testing::Test {
class Mp2tMediaParserTest : public testing::Test {
public:
Mp2tStreamParserTest()
Mp2tMediaParserTest()
: audio_frame_count_(0),
video_frame_count_(0),
video_min_dts_(kNoTimestamp()),
video_max_dts_(kNoTimestamp()) {
bool has_sbr = false;
parser_.reset(new Mp2tStreamParser(has_sbr));
video_min_dts_(kNoTimestamp),
video_max_dts_(kNoTimestamp) {
parser_.reset(new MediaParser());
}
protected:
scoped_ptr<Mp2tStreamParser> parser_;
typedef std::map<int, scoped_refptr<StreamInfo> > StreamMap;
scoped_ptr<MediaParser> parser_;
StreamMap stream_map_;
int audio_frame_count_;
int video_frame_count_;
base::TimeDelta video_min_dts_;
base::TimeDelta video_max_dts_;
int64 video_min_dts_;
int64 video_max_dts_;
bool AppendData(const uint8* data, size_t length) {
return parser_->Parse(data, length);
@ -57,56 +58,37 @@ class Mp2tStreamParserTest : public testing::Test {
return true;
}
void OnInit(bool init_ok,
base::TimeDelta duration,
bool auto_update_timestamp_offset) {
DVLOG(1) << "OnInit: ok=" << init_ok
<< ", dur=" << duration.InMilliseconds()
<< ", autoTimestampOffset=" << auto_update_timestamp_offset;
}
bool OnNewConfig(const AudioDecoderConfig& ac,
const VideoDecoderConfig& vc,
const StreamParser::TextTrackConfigMap& tc) {
DVLOG(1) << "OnNewConfig: audio=" << ac.IsValidConfig()
<< ", video=" << vc.IsValidConfig();
return true;
}
void DumpBuffers(const std::string& label,
const StreamParser::BufferQueue& buffers) {
DVLOG(2) << "DumpBuffers: " << label << " size " << buffers.size();
for (StreamParser::BufferQueue::const_iterator buf = buffers.begin();
buf != buffers.end(); buf++) {
DVLOG(3) << " n=" << buf - buffers.begin()
<< ", size=" << (*buf)->data_size()
<< ", dur=" << (*buf)->duration().InMilliseconds();
void OnInit(const std::vector<scoped_refptr<StreamInfo> >& stream_infos) {
DVLOG(1) << "OnInit: " << stream_infos.size() << " streams.";
for (std::vector<scoped_refptr<StreamInfo> >::const_iterator iter =
stream_infos.begin(); iter != stream_infos.end(); ++iter) {
DVLOG(1) << (*iter)->ToString();
stream_map_[(*iter)->track_id()] = *iter;
}
}
bool OnNewBuffers(const StreamParser::BufferQueue& audio_buffers,
const StreamParser::BufferQueue& video_buffers,
const StreamParser::TextBufferQueueMap& text_map) {
DumpBuffers("audio_buffers", audio_buffers);
DumpBuffers("video_buffers", video_buffers);
audio_frame_count_ += audio_buffers.size();
video_frame_count_ += video_buffers.size();
// TODO(wolenetz/acolwell): Add text track support to more MSE parsers. See
// http://crbug.com/336926.
if (!text_map.empty())
bool OnNewSample(uint32 track_id, const scoped_refptr<MediaSample>& sample) {
std::string stream_type;
StreamMap::const_iterator stream = stream_map_.find(track_id);
if (stream != stream_map_.end()) {
if (stream->second->stream_type() == kStreamAudio) {
++audio_frame_count_;
stream_type = "audio";
} else if (stream->second->stream_type() == kStreamVideo) {
++video_frame_count_;
stream_type = "video";
if (video_min_dts_ == kNoTimestamp)
video_min_dts_ = sample->dts();
// Verify timestamps are increasing.
if (video_max_dts_ == kNoTimestamp)
video_max_dts_ = sample->dts();
else if (video_max_dts_ >= sample->dts()) {
LOG(ERROR) << "Video DTS not strictly increasing.";
return false;
if (video_min_dts_ == kNoTimestamp() && !video_buffers.empty())
video_min_dts_ = video_buffers.front()->GetDecodeTimestamp();
if (!video_buffers.empty()) {
video_max_dts_ = video_buffers.back()->GetDecodeTimestamp();
// Verify monotonicity.
StreamParser::BufferQueue::const_iterator it1 = video_buffers.begin();
StreamParser::BufferQueue::const_iterator it2 = ++it1;
for ( ; it2 != video_buffers.end(); ++it1, ++it2) {
if ((*it2)->GetDecodeTimestamp() < (*it1)->GetDecodeTimestamp())
}
video_max_dts_ = sample->dts();
} else {
LOG(ERROR) << "Missing StreamInfo for track ID " << track_id;
return false;
}
}
@ -114,49 +96,34 @@ class Mp2tStreamParserTest : public testing::Test {
return true;
}
void OnKeyNeeded(const std::string& type,
const std::vector<uint8>& init_data) {
DVLOG(1) << "OnKeyNeeded: " << init_data.size();
}
void OnNewSegment() {
DVLOG(1) << "OnNewSegment";
}
void OnEndOfSegment() {
DVLOG(1) << "OnEndOfSegment()";
void OnKeyNeeded(MediaContainerName container_name,
scoped_ptr<uint8[]> init_data,
int init_data_size) {
DVLOG(1) << "OnKeyNeeded: " << init_data_size;
}
void InitializeParser() {
parser_->Init(
base::Bind(&Mp2tStreamParserTest::OnInit,
base::Bind(&Mp2tMediaParserTest::OnInit,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewConfig,
base::Bind(&Mp2tMediaParserTest::OnNewSample,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewBuffers,
base::Unretained(this)),
true,
base::Bind(&Mp2tStreamParserTest::OnKeyNeeded,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnNewSegment,
base::Unretained(this)),
base::Bind(&Mp2tStreamParserTest::OnEndOfSegment,
base::Unretained(this)),
LogCB());
base::Bind(&Mp2tMediaParserTest::OnKeyNeeded,
base::Unretained(this)));
}
bool ParseMpeg2TsFile(const std::string& filename, int append_bytes) {
InitializeParser();
scoped_refptr<DecoderBuffer> buffer = ReadTestDataFile(filename);
EXPECT_TRUE(AppendDataInPieces(buffer->data(),
buffer->data_size(),
std::vector<uint8> buffer = ReadTestDataFile(filename);
EXPECT_TRUE(AppendDataInPieces(buffer.data(),
buffer.size(),
append_bytes));
return true;
}
};
TEST_F(Mp2tStreamParserTest, UnalignedAppend17) {
TEST_F(Mp2tMediaParserTest, UnalignedAppend17) {
// Test small, non-segment-aligned appends.
ParseMpeg2TsFile("bear-1280x720.ts", 17);
EXPECT_EQ(video_frame_count_, 81);
@ -164,7 +131,7 @@ TEST_F(Mp2tStreamParserTest, UnalignedAppend17) {
EXPECT_EQ(video_frame_count_, 82);
}
TEST_F(Mp2tStreamParserTest, UnalignedAppend512) {
TEST_F(Mp2tMediaParserTest, UnalignedAppend512) {
// Test small, non-segment-aligned appends.
ParseMpeg2TsFile("bear-1280x720.ts", 512);
EXPECT_EQ(video_frame_count_, 81);
@ -172,15 +139,16 @@ TEST_F(Mp2tStreamParserTest, UnalignedAppend512) {
EXPECT_EQ(video_frame_count_, 82);
}
TEST_F(Mp2tStreamParserTest, TimestampWrapAround) {
TEST_F(Mp2tMediaParserTest, TimestampWrapAround) {
// "bear-1280x720_ptswraparound.ts" has been transcoded
// from bear-1280x720.mp4 by applying a time offset of 95442s
// (close to 2^33 / 90000) which results in timestamps wrap around
// in the Mpeg2 TS stream.
ParseMpeg2TsFile("bear-1280x720_ptswraparound.ts", 512);
EXPECT_EQ(video_frame_count_, 81);
EXPECT_GE(video_min_dts_, base::TimeDelta::FromSeconds(95443 - 10));
EXPECT_LE(video_max_dts_, base::TimeDelta::FromSeconds(95443 + 10));
EXPECT_GE(video_min_dts_, (95443 - 1) * kMpeg2Timescale);
EXPECT_LE(video_max_dts_,
static_cast<int64>((95443 + 4)) * kMpeg2Timescale);
}
} // namespace mp2t

View File

@ -7,7 +7,7 @@
#include "base/logging.h"
#include "base/strings/string_number_conversions.h"
#include "media/base/bit_reader.h"
#include "media/base/buffers.h"
#include "media/base/timestamp.h"
#include "media/formats/mp2t/es_parser.h"
#include "media/formats/mp2t/mp2t_common.h"
@ -266,15 +266,15 @@ bool TsSectionPes::ParseInternal(const uint8* raw_pes, int raw_pes_size) {
}
// Convert and unroll the timestamps.
base::TimeDelta media_pts(kNoTimestamp());
base::TimeDelta media_dts(kNoTimestamp());
int64 media_pts(kNoTimestamp);
int64 media_dts(kNoTimestamp);
if (is_pts_valid) {
int64 pts = ConvertTimestampSectionToTimestamp(pts_section);
if (previous_pts_valid_)
pts = UnrollTimestamp(previous_pts_, pts);
previous_pts_ = pts;
previous_pts_valid_ = true;
media_pts = base::TimeDelta::FromMicroseconds((1000 * pts) / 90);
media_pts = pts;
}
if (is_dts_valid) {
int64 dts = ConvertTimestampSectionToTimestamp(dts_section);
@ -282,7 +282,7 @@ bool TsSectionPes::ParseInternal(const uint8* raw_pes, int raw_pes_size) {
dts = UnrollTimestamp(previous_dts_, dts);
previous_dts_ = dts;
previous_dts_valid_ = true;
media_dts = base::TimeDelta::FromMicroseconds((1000 * dts) / 90);
media_dts = dts;
}
// Discard the rest of the PES packet header.
@ -296,8 +296,8 @@ bool TsSectionPes::ParseInternal(const uint8* raw_pes, int raw_pes_size) {
DVLOG(LOG_LEVEL_PES)
<< "Emit a reassembled PES:"
<< " size=" << es_size
<< " pts=" << media_pts.InMilliseconds()
<< " dts=" << media_dts.InMilliseconds()
<< " pts=" << media_pts
<< " dts=" << media_dts
<< " data_alignment_indicator=" << data_alignment_indicator;
return es_parser_->Parse(&raw_pes[es_offset], es_size, media_pts, media_dts);
}
@ -309,4 +309,3 @@ void TsSectionPes::ResetPesState() {
} // namespace mp2t
} // namespace media

View File

@ -37,4 +37,3 @@ class TsSectionPmt : public TsSectionPsi {
} // namespace media
#endif

View File

@ -129,4 +129,3 @@ void TsSectionPsi::ResetPsiState() {
} // namespace mp2t
} // namespace media

View File

@ -10,15 +10,15 @@ namespace media {
// The following conversion table is extracted from ISO 14496 Part 3 -
// Table 1.16 - Sampling Frequency Index.
const int kADTSFrequencyTable[] = {96000, 88200, 64000, 48000, 44100,
const int kAdtsFrequencyTable[] = {96000, 88200, 64000, 48000, 44100,
32000, 24000, 22050, 16000, 12000,
11025, 8000, 7350};
const size_t kADTSFrequencyTableSize = arraysize(kADTSFrequencyTable);
const size_t kAdtsFrequencyTableSize = arraysize(kAdtsFrequencyTable);
// The following conversion table is extracted from ISO 14496 Part 3 -
// Table 1.17 - Channel Configuration.
const int kADTSNumChannelsTable[] = {
const int kAdtsNumChannelsTable[] = {
0, 1, 2, 2, 4, 5, 6, 8 };
const size_t kADTSNumChannelsTableSize = arraysize(kADTSNumChannelsTable);
const size_t kAdtsNumChannelsTableSize = arraysize(kAdtsNumChannelsTable);
} // namespace media

View File

@ -24,7 +24,11 @@ vorbis-packet-1 - timestamp: 0ms, duration: 0ms
vorbis-packet-2 - timestamp: 0ms, duration: 0ms
vorbis-packet-3 - timestamp: 2902ms, duration: 0ms
// Encrypted Files
// Transport streams.
bear-1280x720.ts - AVC + AAC encode, multiplexed into an MPEG2-TS container.
bear-1280x720_ptswraparound.ts - Same as bear-1280x720.ts, with a timestamp wrap-around in the middle.
// Encrypted Files.
bear-1280x720-a_frag-cenc.mp4 - A fragmented MP4 version of the audio track of bear-1280x720.mp4 encrypted (ISO CENC) using key ID [1] and key [2].
bear-1280x720-a_frag-cenc_clear-all.mp4 - Same as bear-1280x720-a_frag-cenc.mp4 but no fragments are encrypted.
bear-1280x720-v_frag-cenc.mp4 - A fragmented MP4 version of the video track of bear-1280x720.mp4 encrypted (ISO CENC) using key ID [1] and key [2].

Binary file not shown.

Binary file not shown.