Modified audio elementary stream parsing to work with packager remux.

Change-Id: Ice95102640e5cbb0382dc4c604c0af013103da99
This commit is contained in:
Thomas Inskip 2014-04-07 10:48:25 -07:00
parent 30b51506c4
commit a7c91ca7dd
14 changed files with 248 additions and 137 deletions

1
.gitignore vendored
View File

@ -1,4 +1,5 @@
*.pyc *.pyc
*~
.cproject .cproject
.project .project
.pydevproject .pydevproject

View File

@ -5,71 +5,72 @@
#include "media/base/audio_timestamp_helper.h" #include "media/base/audio_timestamp_helper.h"
#include "base/logging.h" #include "base/logging.h"
#include "media/base/buffers.h" #include "media/base/timestamp.h"
namespace media { namespace media {
AudioTimestampHelper::AudioTimestampHelper(int samples_per_second) AudioTimestampHelper::AudioTimestampHelper(uint32 timescale,
: base_timestamp_(kNoTimestamp()), uint32 samples_per_second)
: base_timestamp_(kNoTimestamp),
frame_count_(0) { frame_count_(0) {
DCHECK_GT(samples_per_second, 0); DCHECK_GT(samples_per_second, 0u);
double fps = samples_per_second; double fps = samples_per_second;
microseconds_per_frame_ = base::Time::kMicrosecondsPerSecond / fps; ticks_per_frame_ = timescale / fps;
} }
void AudioTimestampHelper::SetBaseTimestamp(base::TimeDelta base_timestamp) { void AudioTimestampHelper::SetBaseTimestamp(int64 base_timestamp) {
base_timestamp_ = base_timestamp; base_timestamp_ = base_timestamp;
frame_count_ = 0; frame_count_ = 0;
} }
base::TimeDelta AudioTimestampHelper::base_timestamp() const { int64 AudioTimestampHelper::base_timestamp() const {
return base_timestamp_; return base_timestamp_;
} }
void AudioTimestampHelper::AddFrames(int frame_count) { void AudioTimestampHelper::AddFrames(int64 frame_count) {
DCHECK_GE(frame_count, 0); DCHECK_GE(frame_count, 0);
DCHECK(base_timestamp_ != kNoTimestamp()); DCHECK(base_timestamp_ != kNoTimestamp);
frame_count_ += frame_count; frame_count_ += frame_count;
} }
base::TimeDelta AudioTimestampHelper::GetTimestamp() const { int64 AudioTimestampHelper::GetTimestamp() const {
return ComputeTimestamp(frame_count_); return ComputeTimestamp(frame_count_);
} }
base::TimeDelta AudioTimestampHelper::GetFrameDuration(int frame_count) const { int64 AudioTimestampHelper::GetFrameDuration(int64 frame_count) const {
DCHECK_GE(frame_count, 0); DCHECK_GE(frame_count, 0);
base::TimeDelta end_timestamp = ComputeTimestamp(frame_count_ + frame_count); int64 end_timestamp = ComputeTimestamp(frame_count_ + frame_count);
return end_timestamp - GetTimestamp(); return end_timestamp - GetTimestamp();
} }
int64 AudioTimestampHelper::GetFramesToTarget(base::TimeDelta target) const { int64 AudioTimestampHelper::GetFramesToTarget(int64 target) const {
DCHECK(base_timestamp_ != kNoTimestamp()); DCHECK(base_timestamp_ != kNoTimestamp);
DCHECK(target >= base_timestamp_); DCHECK(target >= base_timestamp_);
int64 delta_in_us = (target - GetTimestamp()).InMicroseconds(); int64 delta_in_ticks = (target - GetTimestamp());
if (delta_in_us == 0) if (delta_in_ticks == 0)
return 0; return 0;
// Compute a timestamp relative to |base_timestamp_| since timestamps // Compute a timestamp relative to |base_timestamp_| since timestamps
// created from |frame_count_| are computed relative to this base. // created from |frame_count_| are computed relative to this base.
// This ensures that the time to frame computation here is the proper inverse // This ensures that the time to frame computation here is the proper inverse
// of the frame to time computation in ComputeTimestamp(). // of the frame to time computation in ComputeTimestamp().
base::TimeDelta delta_from_base = target - base_timestamp_; int64 delta_from_base = target - base_timestamp_;
// Compute frame count for the time delta. This computation rounds to // Compute frame count for the time delta. This computation rounds to
// the nearest whole number of frames. // the nearest whole number of frames.
double threshold = microseconds_per_frame_ / 2; double threshold = ticks_per_frame_ / 2;
int64 target_frame_count = int64 target_frame_count =
(delta_from_base.InMicroseconds() + threshold) / microseconds_per_frame_; (delta_from_base + threshold) / ticks_per_frame_;
return target_frame_count - frame_count_; return target_frame_count - frame_count_;
} }
base::TimeDelta AudioTimestampHelper::ComputeTimestamp( int64 AudioTimestampHelper::ComputeTimestamp(
int64 frame_count) const { int64 frame_count) const {
DCHECK_GE(frame_count, 0); DCHECK_GE(frame_count, 0);
DCHECK(base_timestamp_ != kNoTimestamp()); DCHECK(base_timestamp_ != kNoTimestamp);
double frames_us = microseconds_per_frame_ * frame_count; double frames_ticks = ticks_per_frame_ * frame_count;
return base_timestamp_ + base::TimeDelta::FromMicroseconds(frames_us); return base_timestamp_ + frames_ticks;
} }
} // namespace media } // namespace media

View File

@ -5,8 +5,7 @@
#ifndef MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_ #ifndef MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
#define MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_ #define MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
#include "base/time/time.h" #include "base/basictypes.h"
#include "media/base/media_export.h"
namespace media { namespace media {
@ -25,41 +24,41 @@ namespace media {
// values for samples added to the current timestamp. GetFramesToTarget() // values for samples added to the current timestamp. GetFramesToTarget()
// determines the number of frames that need to be added/removed from the // determines the number of frames that need to be added/removed from the
// accumulated frames to reach a target timestamp. // accumulated frames to reach a target timestamp.
class MEDIA_EXPORT AudioTimestampHelper { class AudioTimestampHelper {
public: public:
explicit AudioTimestampHelper(int samples_per_second); explicit AudioTimestampHelper(uint32 timescale, uint32 samples_per_second);
// Sets the base timestamp to |base_timestamp| and the sets count to 0. // Sets the base timestamp to |base_timestamp| and the sets count to 0.
void SetBaseTimestamp(base::TimeDelta base_timestamp); void SetBaseTimestamp(int64 base_timestamp);
base::TimeDelta base_timestamp() const; int64 base_timestamp() const;
int64 frame_count() const { return frame_count_; } int64 frame_count() const { return frame_count_; }
// Adds |frame_count| to the frame counter. // Adds |frame_count| to the frame counter.
// Note: SetBaseTimestamp() must be called with a value other than // Note: SetBaseTimestamp() must be called with a value other than
// kNoTimestamp() before this method can be called. // kNoTimestamp() before this method can be called.
void AddFrames(int frame_count); void AddFrames(int64 frame_count);
// Get the current timestamp. This value is computed from the base_timestamp() // Get the current timestamp. This value is computed from the base_timestamp()
// and the number of sample frames that have been added so far. // and the number of sample frames that have been added so far.
base::TimeDelta GetTimestamp() const; int64 GetTimestamp() const;
// Gets the duration if |frame_count| frames were added to the current // Gets the duration if |frame_count| frames were added to the current
// timestamp reported by GetTimestamp(). This method ensures that // timestamp reported by GetTimestamp(). This method ensures that
// (GetTimestamp() + GetFrameDuration(n)) will equal the timestamp that // (GetTimestamp() + GetFrameDuration(n)) will equal the timestamp that
// GetTimestamp() will return if AddFrames(n) is called. // GetTimestamp() will return if AddFrames(n) is called.
base::TimeDelta GetFrameDuration(int frame_count) const; int64 GetFrameDuration(int64 frame_count) const;
// Returns the number of frames needed to reach the target timestamp. // Returns the number of frames needed to reach the target timestamp.
// Note: |target| must be >= |base_timestamp_|. // Note: |target| must be >= |base_timestamp_|.
int64 GetFramesToTarget(base::TimeDelta target) const; int64 GetFramesToTarget(int64 target) const;
private: private:
base::TimeDelta ComputeTimestamp(int64 frame_count) const; int64 ComputeTimestamp(int64 frame_count) const;
double microseconds_per_frame_; double ticks_per_frame_;
base::TimeDelta base_timestamp_; int64 base_timestamp_;
// Number of frames accumulated by AddFrames() calls. // Number of frames accumulated by AddFrames() calls.
int64 frame_count_; int64 frame_count_;

View File

@ -3,29 +3,29 @@
// found in the LICENSE file. // found in the LICENSE file.
#include "media/base/audio_timestamp_helper.h" #include "media/base/audio_timestamp_helper.h"
#include "media/base/buffers.h" #include "media/base/timestamp.h"
#include "testing/gtest/include/gtest/gtest.h" #include "testing/gtest/include/gtest/gtest.h"
namespace media { namespace media {
static const int kDefaultSampleRate = 44100; static const uint32 kDefaultSampleRate = 44100;
static const uint32 kTimescale = 1000000;
class AudioTimestampHelperTest : public ::testing::Test { class AudioTimestampHelperTest : public ::testing::Test {
public: public:
AudioTimestampHelperTest() : helper_(kDefaultSampleRate) { AudioTimestampHelperTest() : helper_(kTimescale, kDefaultSampleRate) {
helper_.SetBaseTimestamp(base::TimeDelta()); helper_.SetBaseTimestamp(0);
} }
// Adds frames to the helper and returns the current timestamp in // Adds frames to the helper and returns the current timestamp in
// microseconds. // microseconds.
int64 AddFrames(int frames) { int64 AddFrames(int frames) {
helper_.AddFrames(frames); helper_.AddFrames(frames);
return helper_.GetTimestamp().InMicroseconds(); return helper_.GetTimestamp();
} }
int64 FramesToTarget(int target_in_microseconds) { int64 FramesToTarget(int target_in_microseconds) {
return helper_.GetFramesToTarget( return helper_.GetFramesToTarget(target_in_microseconds);
base::TimeDelta::FromMicroseconds(target_in_microseconds));
} }
void TestGetFramesToTargetRange(int frame_count, int start, int end) { void TestGetFramesToTargetRange(int frame_count, int start, int end) {
@ -42,7 +42,7 @@ class AudioTimestampHelperTest : public ::testing::Test {
}; };
TEST_F(AudioTimestampHelperTest, Basic) { TEST_F(AudioTimestampHelperTest, Basic) {
EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds()); EXPECT_EQ(0, helper_.GetTimestamp());
// Verify that the output timestamp is always rounded down to the // Verify that the output timestamp is always rounded down to the
// nearest microsecond. 1 frame @ 44100 is ~22.67573 microseconds, // nearest microsecond. 1 frame @ 44100 is ~22.67573 microseconds,
@ -57,30 +57,30 @@ TEST_F(AudioTimestampHelperTest, Basic) {
// Verify that adding frames one frame at a time matches the timestamp // Verify that adding frames one frame at a time matches the timestamp
// returned if the same number of frames are added all at once. // returned if the same number of frames are added all at once.
base::TimeDelta timestamp_1 = helper_.GetTimestamp(); int64 timestamp_1 = helper_.GetTimestamp();
helper_.SetBaseTimestamp(kNoTimestamp()); helper_.SetBaseTimestamp(kNoTimestamp);
EXPECT_TRUE(kNoTimestamp() == helper_.base_timestamp()); EXPECT_TRUE(kNoTimestamp == helper_.base_timestamp());
helper_.SetBaseTimestamp(base::TimeDelta()); helper_.SetBaseTimestamp(0);
EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds()); EXPECT_EQ(0, helper_.GetTimestamp());
helper_.AddFrames(5); helper_.AddFrames(5);
EXPECT_EQ(113, helper_.GetTimestamp().InMicroseconds()); EXPECT_EQ(113, helper_.GetTimestamp());
EXPECT_TRUE(timestamp_1 == helper_.GetTimestamp()); EXPECT_TRUE(timestamp_1 == helper_.GetTimestamp());
} }
TEST_F(AudioTimestampHelperTest, GetDuration) { TEST_F(AudioTimestampHelperTest, GetDuration) {
helper_.SetBaseTimestamp(base::TimeDelta::FromMicroseconds(100)); helper_.SetBaseTimestamp(100);
int frame_count = 5; int frame_count = 5;
int64 expected_durations[] = { 113, 113, 114, 113, 113, 114 }; int64 expected_durations[] = { 113, 113, 114, 113, 113, 114 };
for (size_t i = 0; i < arraysize(expected_durations); ++i) { for (size_t i = 0; i < arraysize(expected_durations); ++i) {
base::TimeDelta duration = helper_.GetFrameDuration(frame_count); int64 duration = helper_.GetFrameDuration(frame_count);
EXPECT_EQ(expected_durations[i], duration.InMicroseconds()); EXPECT_EQ(expected_durations[i], duration);
base::TimeDelta timestamp_1 = helper_.GetTimestamp() + duration; int64 timestamp_1 = helper_.GetTimestamp() + duration;
helper_.AddFrames(frame_count); helper_.AddFrames(frame_count);
base::TimeDelta timestamp_2 = helper_.GetTimestamp(); int64 timestamp_2 = helper_.GetTimestamp();
EXPECT_TRUE(timestamp_1 == timestamp_2); EXPECT_TRUE(timestamp_1 == timestamp_2);
} }
} }

View File

@ -55,6 +55,8 @@
'aes_encryptor.h', 'aes_encryptor.h',
'audio_stream_info.cc', 'audio_stream_info.cc',
'audio_stream_info.h', 'audio_stream_info.h',
'audio_timestamp_helper.cc',
'audio_timestamp_helper.h',
'bit_reader.cc', 'bit_reader.cc',
'bit_reader.h', 'bit_reader.h',
'buffer_reader.cc', 'buffer_reader.cc',
@ -94,6 +96,7 @@
'stream_info.cc', 'stream_info.cc',
'stream_info.h', 'stream_info.h',
'text_track.h', 'text_track.h',
'timestamp.h',
'video_stream_info.cc', 'video_stream_info.cc',
'video_stream_info.h', 'video_stream_info.h',
'widevine_encryptor_source.cc', 'widevine_encryptor_source.cc',
@ -111,6 +114,7 @@
'type': '<(gtest_target_type)', 'type': '<(gtest_target_type)',
'sources': [ 'sources': [
'aes_encryptor_unittest.cc', 'aes_encryptor_unittest.cc',
'audio_timestamp_helper_unittest.cc',
'bit_reader_unittest.cc', 'bit_reader_unittest.cc',
'buffer_writer_unittest.cc', 'buffer_writer_unittest.cc',
'closure_thread_unittest.cc', 'closure_thread_unittest.cc',

20
media/base/timestamp.h Normal file
View File

@ -0,0 +1,20 @@
// Copyright 2014 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#ifndef MEDIA_BASE_TIMESTAMP_H_
#define MEDIA_BASE_TIMESTAMP_H_
#include "base/basictypes.h"
namespace media {
const int64 kNoTimestamp = kint64min;
const int64 kInfiniteDuration = kint64max;
} // namespace media
#endif // MEDIA_BASE_TIMESTAMP_H_

View File

@ -8,32 +8,34 @@
#include "base/basictypes.h" #include "base/basictypes.h"
#include "base/callback.h" #include "base/callback.h"
#include "base/memory/ref_counted.h" #include "base/memory/ref_counted.h"
#include "base/time/time.h"
namespace media { namespace media {
class StreamParserBuffer; class MediaSample;
namespace mp2t { namespace mp2t {
class EsParser { class EsParser {
public: public:
typedef base::Callback<void(scoped_refptr<StreamParserBuffer>)> EmitBufferCB; typedef base::Callback<void(scoped_refptr<MediaSample>)> EmitSampleCB;
EsParser() {} EsParser(uint32 track_id) : track_id_(track_id) {}
virtual ~EsParser() {} virtual ~EsParser() {}
// ES parsing. // ES parsing.
// Should use kNoTimestamp when a timestamp is not valid. // Should use kNoTimestamp when a timestamp is not valid.
virtual bool Parse(const uint8* buf, int size, virtual bool Parse(const uint8* buf, int size, int64 pts, int64 dts) = 0;
base::TimeDelta pts,
base::TimeDelta dts) = 0;
// Flush any pending buffer. // Flush any pending buffer.
virtual void Flush() = 0; virtual void Flush() = 0;
// Reset the state of the ES parser. // Reset the state of the ES parser.
virtual void Reset() = 0; virtual void Reset() = 0;
uint32 track_id() { return track_id_; }
private:
uint32 track_id_;
}; };
} // namespace mp2t } // namespace mp2t

View File

@ -11,9 +11,8 @@
#include "base/strings/string_number_conversions.h" #include "base/strings/string_number_conversions.h"
#include "media/base/audio_timestamp_helper.h" #include "media/base/audio_timestamp_helper.h"
#include "media/base/bit_reader.h" #include "media/base/bit_reader.h"
#include "media/base/buffers.h" #include "media/base/media_sample.h"
#include "media/base/channel_layout.h" #include "media/base/timestamp.h"
#include "media/base/stream_parser_buffer.h"
#include "media/formats/mp2t/mp2t_common.h" #include "media/formats/mp2t/mp2t_common.h"
#include "media/formats/mpeg/adts_constants.h" #include "media/formats/mpeg/adts_constants.h"
@ -99,26 +98,26 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
namespace mp2t { namespace mp2t {
EsParserAdts::EsParserAdts( EsParserAdts::EsParserAdts(
uint32 track_id,
const NewAudioConfigCB& new_audio_config_cb, const NewAudioConfigCB& new_audio_config_cb,
const EmitBufferCB& emit_buffer_cb, const EmitSampleCB& emit_sample_cb,
bool sbr_in_mimetype) bool sbr_in_mimetype)
: new_audio_config_cb_(new_audio_config_cb), : EsParser(track_id),
emit_buffer_cb_(emit_buffer_cb), new_audio_config_cb_(new_audio_config_cb),
emit_sample_cb_(emit_sample_cb),
sbr_in_mimetype_(sbr_in_mimetype) { sbr_in_mimetype_(sbr_in_mimetype) {
} }
EsParserAdts::~EsParserAdts() { EsParserAdts::~EsParserAdts() {
} }
bool EsParserAdts::Parse(const uint8* buf, int size, bool EsParserAdts::Parse(const uint8* buf, int size, int64 pts, int64 dts) {
base::TimeDelta pts,
base::TimeDelta dts) {
int raw_es_size; int raw_es_size;
const uint8* raw_es; const uint8* raw_es;
// The incoming PTS applies to the access unit that comes just after // The incoming PTS applies to the access unit that comes just after
// the beginning of |buf|. // the beginning of |buf|.
if (pts != kNoTimestamp()) { if (pts != kNoTimestamp) {
es_byte_queue_.Peek(&raw_es, &raw_es_size); es_byte_queue_.Peek(&raw_es, &raw_es_size);
pts_list_.push_back(EsPts(raw_es_size, pts)); pts_list_.push_back(EsPts(raw_es_size, pts));
} }
@ -156,25 +155,22 @@ bool EsParserAdts::Parse(const uint8* buf, int size,
pts_list_.pop_front(); pts_list_.pop_front();
} }
base::TimeDelta current_pts = audio_timestamp_helper_->GetTimestamp(); int64 current_pts = audio_timestamp_helper_->GetTimestamp();
base::TimeDelta frame_duration = int64 frame_duration =
audio_timestamp_helper_->GetFrameDuration(kSamplesPerAACFrame); audio_timestamp_helper_->GetFrameDuration(kSamplesPerAACFrame);
// Emit an audio frame. // Emit an audio frame.
bool is_key_frame = true; bool is_key_frame = true;
// TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId scoped_refptr<MediaSample> sample =
// type and allow multiple audio tracks. See https://crbug.com/341581. MediaSample::CopyFrom(
scoped_refptr<StreamParserBuffer> stream_parser_buffer =
StreamParserBuffer::CopyFrom(
&raw_es[es_position], &raw_es[es_position],
frame_size, frame_size,
is_key_frame, is_key_frame);
DemuxerStream::AUDIO, 0); sample->set_pts(current_pts);
stream_parser_buffer->SetDecodeTimestamp(current_pts); sample->set_dts(current_pts);
stream_parser_buffer->set_timestamp(current_pts); sample->set_duration(frame_duration);
stream_parser_buffer->set_duration(frame_duration); emit_sample_cb_.Run(sample);
emit_buffer_cb_.Run(stream_parser_buffer);
// Update the PTS of the next frame. // Update the PTS of the next frame.
audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame); audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame);
@ -195,10 +191,16 @@ void EsParserAdts::Flush() {
void EsParserAdts::Reset() { void EsParserAdts::Reset() {
es_byte_queue_.Reset(); es_byte_queue_.Reset();
pts_list_.clear(); pts_list_.clear();
last_audio_decoder_config_ = AudioDecoderConfig(); last_audio_decoder_config_ = scoped_refptr<AudioStreamInfo>();
} }
bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) { bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
if (last_audio_decoder_config_) {
// Varying audio configurations currently not supported. Just assume that
// the audio configuration has not changed.
return true;
}
size_t frequency_index = ExtractAdtsFrequencyIndex(adts_header); size_t frequency_index = ExtractAdtsFrequencyIndex(adts_header);
if (frequency_index >= kADTSFrequencyTableSize) { if (frequency_index >= kADTSFrequencyTableSize) {
// Frequency index 13 & 14 are reserved // Frequency index 13 & 14 are reserved
@ -227,33 +229,38 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
? std::min(2 * samples_per_second, 48000) ? std::min(2 * samples_per_second, 48000)
: samples_per_second; : samples_per_second;
AudioDecoderConfig audio_decoder_config( last_audio_decoder_config_ = scoped_refptr<AudioStreamInfo>
(new AudioStreamInfo(
track_id(),
kMpeg2Timescale,
kInfiniteDuration,
kCodecAAC, kCodecAAC,
kSampleFormatS16, std::string(), // TODO(tinskip): calculate codec string.
std::string(),
16,
kADTSChannelLayoutTable[channel_configuration], kADTSChannelLayoutTable[channel_configuration],
extended_samples_per_second, samples_per_second,
NULL, 0, NULL, // TODO(tinskip): calculate AudioSpecificConfig.
false); 0,
false));
if (!audio_decoder_config.Matches(last_audio_decoder_config_)) {
DVLOG(1) << "Sampling frequency: " << samples_per_second; DVLOG(1) << "Sampling frequency: " << samples_per_second;
DVLOG(1) << "Extended sampling frequency: " << extended_samples_per_second; DVLOG(1) << "Extended sampling frequency: " << extended_samples_per_second;
DVLOG(1) << "Channel config: " << channel_configuration; DVLOG(1) << "Channel config: " << channel_configuration;
DVLOG(1) << "Adts profile: " << adts_profile; DVLOG(1) << "Adts profile: " << adts_profile;
// Reset the timestamp helper to use a new time scale. // Reset the timestamp helper to use a new sampling frequency.
if (audio_timestamp_helper_) { if (audio_timestamp_helper_) {
base::TimeDelta base_timestamp = audio_timestamp_helper_->GetTimestamp(); int64 base_timestamp = audio_timestamp_helper_->GetTimestamp();
audio_timestamp_helper_.reset( audio_timestamp_helper_.reset(
new AudioTimestampHelper(samples_per_second)); new AudioTimestampHelper(kMpeg2Timescale, samples_per_second));
audio_timestamp_helper_->SetBaseTimestamp(base_timestamp); audio_timestamp_helper_->SetBaseTimestamp(base_timestamp);
} else { } else {
audio_timestamp_helper_.reset( audio_timestamp_helper_.reset(
new AudioTimestampHelper(samples_per_second)); new AudioTimestampHelper(kMpeg2Timescale, samples_per_second));
} }
// Audio config notification. // Audio config notification.
last_audio_decoder_config_ = audio_decoder_config; new_audio_config_cb_.Run(last_audio_decoder_config_);
new_audio_config_cb_.Run(audio_decoder_config);
}
return true; return true;
} }
@ -273,4 +280,3 @@ void EsParserAdts::DiscardEs(int nbytes) {
} // namespace mp2t } // namespace mp2t
} // namespace media } // namespace media

View File

@ -11,15 +11,13 @@
#include "base/callback.h" #include "base/callback.h"
#include "base/compiler_specific.h" #include "base/compiler_specific.h"
#include "base/memory/scoped_ptr.h" #include "base/memory/scoped_ptr.h"
#include "base/time/time.h" #include "media/base/audio_stream_info.h"
#include "media/base/audio_decoder_config.h"
#include "media/base/byte_queue.h" #include "media/base/byte_queue.h"
#include "media/formats/mp2t/es_parser.h" #include "media/formats/mp2t/es_parser.h"
namespace media { namespace media {
class AudioTimestampHelper; class AudioTimestampHelper;
class BitReader; class BitReader;
class StreamParserBuffer;
} }
namespace media { namespace media {
@ -27,23 +25,24 @@ namespace mp2t {
class EsParserAdts : public EsParser { class EsParserAdts : public EsParser {
public: public:
typedef base::Callback<void(const AudioDecoderConfig&)> NewAudioConfigCB; typedef base::Callback<void(scoped_refptr<AudioStreamInfo>&)> NewAudioConfigCB;
EsParserAdts(const NewAudioConfigCB& new_audio_config_cb, EsParserAdts(uint32 track_id,
const EmitBufferCB& emit_buffer_cb, const NewAudioConfigCB& new_audio_config_cb,
const EmitSampleCB& emit_sample_cb,
bool sbr_in_mimetype); bool sbr_in_mimetype);
virtual ~EsParserAdts(); virtual ~EsParserAdts();
// EsParser implementation. // EsParser implementation.
virtual bool Parse(const uint8* buf, int size, virtual bool Parse(const uint8* buf, int size,
base::TimeDelta pts, int64 pts,
base::TimeDelta dts) OVERRIDE; int64 dts) OVERRIDE;
virtual void Flush() OVERRIDE; virtual void Flush() OVERRIDE;
virtual void Reset() OVERRIDE; virtual void Reset() OVERRIDE;
private: private:
// Used to link a PTS with a byte position in the ES stream. // Used to link a PTS with a byte position in the ES stream.
typedef std::pair<int, base::TimeDelta> EsPts; typedef std::pair<int, int64> EsPts;
typedef std::list<EsPts> EsPtsList; typedef std::list<EsPts> EsPtsList;
// Signal any audio configuration change (if any). // Signal any audio configuration change (if any).
@ -58,7 +57,7 @@ class EsParserAdts : public EsParser {
// - to signal a new audio configuration, // - to signal a new audio configuration,
// - to send ES buffers. // - to send ES buffers.
NewAudioConfigCB new_audio_config_cb_; NewAudioConfigCB new_audio_config_cb_;
EmitBufferCB emit_buffer_cb_; EmitSampleCB emit_sample_cb_;
// True when AAC SBR extension is signalled in the mimetype // True when AAC SBR extension is signalled in the mimetype
// (mp4a.40.5 in the codecs parameter). // (mp4a.40.5 in the codecs parameter).
@ -73,8 +72,7 @@ class EsParserAdts : public EsParser {
// Interpolated PTS for frames that don't have one. // Interpolated PTS for frames that don't have one.
scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_; scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
// Last audio config. scoped_refptr<AudioStreamInfo> last_audio_decoder_config_;
AudioDecoderConfig last_audio_decoder_config_;
DISALLOW_COPY_AND_ASSIGN(EsParserAdts); DISALLOW_COPY_AND_ASSIGN(EsParserAdts);
}; };
@ -83,4 +81,3 @@ class EsParserAdts : public EsParser {
} // namespace media } // namespace media
#endif #endif

View File

@ -0,0 +1,43 @@
# Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
{
'variables': {
# Compile as chromium code to enable warnings and warnings-as-errors.
'chromium_code': 1,
},
'target_defaults': {
'include_dirs': [
'../../..',
],
},
'targets': [
{
'target_name': 'mp2t',
'type': '<(component)',
'sources': [
'es_parser.h',
'es_parser_adts.cc',
'es_parser_adts.h',
],
'dependencies': [
'../../base/media_base.gyp:base',
],
},
{
'target_name': 'mp2t_unittest',
'type': '<(gtest_target_type)',
'sources': [
],
'dependencies': [
'../../../testing/gtest.gyp:gtest',
'../../../testing/gmock.gyp:gmock',
'../../test/media_test.gyp:media_test_support',
'mp2t',
]
},
],
}

View File

@ -19,3 +19,8 @@
#endif #endif
namespace media {
const uint32 kMpeg2Timescale = 90000;
} // namespace media

View File

@ -17,11 +17,8 @@ const size_t kADTSFrequencyTableSize = arraysize(kADTSFrequencyTable);
// The following conversion table is extracted from ISO 14496 Part 3 - // The following conversion table is extracted from ISO 14496 Part 3 -
// Table 1.17 - Channel Configuration. // Table 1.17 - Channel Configuration.
const media::ChannelLayout kADTSChannelLayoutTable[] = { const int kADTSChannelLayoutTable[] = {
media::CHANNEL_LAYOUT_NONE, media::CHANNEL_LAYOUT_MONO, 0, 1, 2, 2, 4, 5, 6, 8 };
media::CHANNEL_LAYOUT_STEREO, media::CHANNEL_LAYOUT_SURROUND,
media::CHANNEL_LAYOUT_4_0, media::CHANNEL_LAYOUT_5_0_BACK,
media::CHANNEL_LAYOUT_5_1_BACK, media::CHANNEL_LAYOUT_7_1};
const size_t kADTSChannelLayoutTableSize = arraysize(kADTSChannelLayoutTable); const size_t kADTSChannelLayoutTableSize = arraysize(kADTSChannelLayoutTable);
} // namespace media } // namespace media

View File

@ -7,9 +7,6 @@
#include <stddef.h> #include <stddef.h>
#include "media/base/channel_layout.h"
#include "media/base/media_export.h"
namespace media { namespace media {
enum { enum {
@ -17,11 +14,11 @@ enum {
kSamplesPerAACFrame = 1024, kSamplesPerAACFrame = 1024,
}; };
MEDIA_EXPORT extern const int kADTSFrequencyTable[]; extern const int kADTSFrequencyTable[];
MEDIA_EXPORT extern const size_t kADTSFrequencyTableSize; extern const size_t kADTSFrequencyTableSize;
MEDIA_EXPORT extern const media::ChannelLayout kADTSChannelLayoutTable[]; extern const int kADTSChannelLayoutTable[];
MEDIA_EXPORT extern const size_t kADTSChannelLayoutTableSize; extern const size_t kADTSChannelLayoutTableSize;
} // namespace media } // namespace media

View File

@ -0,0 +1,39 @@
# Copyright 2014 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
{
'variables': {
# Compile as chromium code to enable warnings and warnings-as-errors.
'chromium_code': 1,
},
'target_defaults': {
'include_dirs': [
'../../..',
],
},
'targets': [
{
'target_name': 'mpeg',
'type': '<(component)',
'sources': [
'adts_constants.cc',
'adts_constants.h',
],
},
{
'target_name': 'mpeg_unittest',
'type': '<(gtest_target_type)',
'sources': [
],
'dependencies': [
'../../../testing/gtest.gyp:gtest',
'../../../testing/gmock.gyp:gmock',
'../../test/media_test.gyp:media_test_support',
'mpeg',
]
},
],
}