Modified audio elementary stream parsing to work with packager remux.
Change-Id: Ice95102640e5cbb0382dc4c604c0af013103da99
This commit is contained in:
parent
30b51506c4
commit
a7c91ca7dd
|
@ -1,4 +1,5 @@
|
|||
*.pyc
|
||||
*~
|
||||
.cproject
|
||||
.project
|
||||
.pydevproject
|
||||
|
|
|
@ -5,71 +5,72 @@
|
|||
#include "media/base/audio_timestamp_helper.h"
|
||||
|
||||
#include "base/logging.h"
|
||||
#include "media/base/buffers.h"
|
||||
#include "media/base/timestamp.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
AudioTimestampHelper::AudioTimestampHelper(int samples_per_second)
|
||||
: base_timestamp_(kNoTimestamp()),
|
||||
AudioTimestampHelper::AudioTimestampHelper(uint32 timescale,
|
||||
uint32 samples_per_second)
|
||||
: base_timestamp_(kNoTimestamp),
|
||||
frame_count_(0) {
|
||||
DCHECK_GT(samples_per_second, 0);
|
||||
DCHECK_GT(samples_per_second, 0u);
|
||||
double fps = samples_per_second;
|
||||
microseconds_per_frame_ = base::Time::kMicrosecondsPerSecond / fps;
|
||||
ticks_per_frame_ = timescale / fps;
|
||||
}
|
||||
|
||||
void AudioTimestampHelper::SetBaseTimestamp(base::TimeDelta base_timestamp) {
|
||||
void AudioTimestampHelper::SetBaseTimestamp(int64 base_timestamp) {
|
||||
base_timestamp_ = base_timestamp;
|
||||
frame_count_ = 0;
|
||||
}
|
||||
|
||||
base::TimeDelta AudioTimestampHelper::base_timestamp() const {
|
||||
int64 AudioTimestampHelper::base_timestamp() const {
|
||||
return base_timestamp_;
|
||||
}
|
||||
|
||||
void AudioTimestampHelper::AddFrames(int frame_count) {
|
||||
void AudioTimestampHelper::AddFrames(int64 frame_count) {
|
||||
DCHECK_GE(frame_count, 0);
|
||||
DCHECK(base_timestamp_ != kNoTimestamp());
|
||||
DCHECK(base_timestamp_ != kNoTimestamp);
|
||||
frame_count_ += frame_count;
|
||||
}
|
||||
|
||||
base::TimeDelta AudioTimestampHelper::GetTimestamp() const {
|
||||
int64 AudioTimestampHelper::GetTimestamp() const {
|
||||
return ComputeTimestamp(frame_count_);
|
||||
}
|
||||
|
||||
base::TimeDelta AudioTimestampHelper::GetFrameDuration(int frame_count) const {
|
||||
int64 AudioTimestampHelper::GetFrameDuration(int64 frame_count) const {
|
||||
DCHECK_GE(frame_count, 0);
|
||||
base::TimeDelta end_timestamp = ComputeTimestamp(frame_count_ + frame_count);
|
||||
int64 end_timestamp = ComputeTimestamp(frame_count_ + frame_count);
|
||||
return end_timestamp - GetTimestamp();
|
||||
}
|
||||
|
||||
int64 AudioTimestampHelper::GetFramesToTarget(base::TimeDelta target) const {
|
||||
DCHECK(base_timestamp_ != kNoTimestamp());
|
||||
int64 AudioTimestampHelper::GetFramesToTarget(int64 target) const {
|
||||
DCHECK(base_timestamp_ != kNoTimestamp);
|
||||
DCHECK(target >= base_timestamp_);
|
||||
|
||||
int64 delta_in_us = (target - GetTimestamp()).InMicroseconds();
|
||||
if (delta_in_us == 0)
|
||||
int64 delta_in_ticks = (target - GetTimestamp());
|
||||
if (delta_in_ticks == 0)
|
||||
return 0;
|
||||
|
||||
// Compute a timestamp relative to |base_timestamp_| since timestamps
|
||||
// created from |frame_count_| are computed relative to this base.
|
||||
// This ensures that the time to frame computation here is the proper inverse
|
||||
// of the frame to time computation in ComputeTimestamp().
|
||||
base::TimeDelta delta_from_base = target - base_timestamp_;
|
||||
int64 delta_from_base = target - base_timestamp_;
|
||||
|
||||
// Compute frame count for the time delta. This computation rounds to
|
||||
// the nearest whole number of frames.
|
||||
double threshold = microseconds_per_frame_ / 2;
|
||||
double threshold = ticks_per_frame_ / 2;
|
||||
int64 target_frame_count =
|
||||
(delta_from_base.InMicroseconds() + threshold) / microseconds_per_frame_;
|
||||
(delta_from_base + threshold) / ticks_per_frame_;
|
||||
return target_frame_count - frame_count_;
|
||||
}
|
||||
|
||||
base::TimeDelta AudioTimestampHelper::ComputeTimestamp(
|
||||
int64 AudioTimestampHelper::ComputeTimestamp(
|
||||
int64 frame_count) const {
|
||||
DCHECK_GE(frame_count, 0);
|
||||
DCHECK(base_timestamp_ != kNoTimestamp());
|
||||
double frames_us = microseconds_per_frame_ * frame_count;
|
||||
return base_timestamp_ + base::TimeDelta::FromMicroseconds(frames_us);
|
||||
DCHECK(base_timestamp_ != kNoTimestamp);
|
||||
double frames_ticks = ticks_per_frame_ * frame_count;
|
||||
return base_timestamp_ + frames_ticks;
|
||||
}
|
||||
|
||||
} // namespace media
|
||||
|
|
|
@ -5,8 +5,7 @@
|
|||
#ifndef MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
|
||||
#define MEDIA_BASE_AUDIO_TIMESTAMP_HELPER_H_
|
||||
|
||||
#include "base/time/time.h"
|
||||
#include "media/base/media_export.h"
|
||||
#include "base/basictypes.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
|
@ -25,41 +24,41 @@ namespace media {
|
|||
// values for samples added to the current timestamp. GetFramesToTarget()
|
||||
// determines the number of frames that need to be added/removed from the
|
||||
// accumulated frames to reach a target timestamp.
|
||||
class MEDIA_EXPORT AudioTimestampHelper {
|
||||
class AudioTimestampHelper {
|
||||
public:
|
||||
explicit AudioTimestampHelper(int samples_per_second);
|
||||
explicit AudioTimestampHelper(uint32 timescale, uint32 samples_per_second);
|
||||
|
||||
// Sets the base timestamp to |base_timestamp| and the sets count to 0.
|
||||
void SetBaseTimestamp(base::TimeDelta base_timestamp);
|
||||
void SetBaseTimestamp(int64 base_timestamp);
|
||||
|
||||
base::TimeDelta base_timestamp() const;
|
||||
int64 base_timestamp() const;
|
||||
int64 frame_count() const { return frame_count_; }
|
||||
|
||||
// Adds |frame_count| to the frame counter.
|
||||
// Note: SetBaseTimestamp() must be called with a value other than
|
||||
// kNoTimestamp() before this method can be called.
|
||||
void AddFrames(int frame_count);
|
||||
void AddFrames(int64 frame_count);
|
||||
|
||||
// Get the current timestamp. This value is computed from the base_timestamp()
|
||||
// and the number of sample frames that have been added so far.
|
||||
base::TimeDelta GetTimestamp() const;
|
||||
int64 GetTimestamp() const;
|
||||
|
||||
// Gets the duration if |frame_count| frames were added to the current
|
||||
// timestamp reported by GetTimestamp(). This method ensures that
|
||||
// (GetTimestamp() + GetFrameDuration(n)) will equal the timestamp that
|
||||
// GetTimestamp() will return if AddFrames(n) is called.
|
||||
base::TimeDelta GetFrameDuration(int frame_count) const;
|
||||
int64 GetFrameDuration(int64 frame_count) const;
|
||||
|
||||
// Returns the number of frames needed to reach the target timestamp.
|
||||
// Note: |target| must be >= |base_timestamp_|.
|
||||
int64 GetFramesToTarget(base::TimeDelta target) const;
|
||||
int64 GetFramesToTarget(int64 target) const;
|
||||
|
||||
private:
|
||||
base::TimeDelta ComputeTimestamp(int64 frame_count) const;
|
||||
int64 ComputeTimestamp(int64 frame_count) const;
|
||||
|
||||
double microseconds_per_frame_;
|
||||
double ticks_per_frame_;
|
||||
|
||||
base::TimeDelta base_timestamp_;
|
||||
int64 base_timestamp_;
|
||||
|
||||
// Number of frames accumulated by AddFrames() calls.
|
||||
int64 frame_count_;
|
||||
|
|
|
@ -3,29 +3,29 @@
|
|||
// found in the LICENSE file.
|
||||
|
||||
#include "media/base/audio_timestamp_helper.h"
|
||||
#include "media/base/buffers.h"
|
||||
#include "media/base/timestamp.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
static const int kDefaultSampleRate = 44100;
|
||||
static const uint32 kDefaultSampleRate = 44100;
|
||||
static const uint32 kTimescale = 1000000;
|
||||
|
||||
class AudioTimestampHelperTest : public ::testing::Test {
|
||||
public:
|
||||
AudioTimestampHelperTest() : helper_(kDefaultSampleRate) {
|
||||
helper_.SetBaseTimestamp(base::TimeDelta());
|
||||
AudioTimestampHelperTest() : helper_(kTimescale, kDefaultSampleRate) {
|
||||
helper_.SetBaseTimestamp(0);
|
||||
}
|
||||
|
||||
// Adds frames to the helper and returns the current timestamp in
|
||||
// microseconds.
|
||||
int64 AddFrames(int frames) {
|
||||
helper_.AddFrames(frames);
|
||||
return helper_.GetTimestamp().InMicroseconds();
|
||||
return helper_.GetTimestamp();
|
||||
}
|
||||
|
||||
int64 FramesToTarget(int target_in_microseconds) {
|
||||
return helper_.GetFramesToTarget(
|
||||
base::TimeDelta::FromMicroseconds(target_in_microseconds));
|
||||
return helper_.GetFramesToTarget(target_in_microseconds);
|
||||
}
|
||||
|
||||
void TestGetFramesToTargetRange(int frame_count, int start, int end) {
|
||||
|
@ -42,7 +42,7 @@ class AudioTimestampHelperTest : public ::testing::Test {
|
|||
};
|
||||
|
||||
TEST_F(AudioTimestampHelperTest, Basic) {
|
||||
EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds());
|
||||
EXPECT_EQ(0, helper_.GetTimestamp());
|
||||
|
||||
// Verify that the output timestamp is always rounded down to the
|
||||
// nearest microsecond. 1 frame @ 44100 is ~22.67573 microseconds,
|
||||
|
@ -57,30 +57,30 @@ TEST_F(AudioTimestampHelperTest, Basic) {
|
|||
|
||||
// Verify that adding frames one frame at a time matches the timestamp
|
||||
// returned if the same number of frames are added all at once.
|
||||
base::TimeDelta timestamp_1 = helper_.GetTimestamp();
|
||||
helper_.SetBaseTimestamp(kNoTimestamp());
|
||||
EXPECT_TRUE(kNoTimestamp() == helper_.base_timestamp());
|
||||
helper_.SetBaseTimestamp(base::TimeDelta());
|
||||
EXPECT_EQ(0, helper_.GetTimestamp().InMicroseconds());
|
||||
int64 timestamp_1 = helper_.GetTimestamp();
|
||||
helper_.SetBaseTimestamp(kNoTimestamp);
|
||||
EXPECT_TRUE(kNoTimestamp == helper_.base_timestamp());
|
||||
helper_.SetBaseTimestamp(0);
|
||||
EXPECT_EQ(0, helper_.GetTimestamp());
|
||||
|
||||
helper_.AddFrames(5);
|
||||
EXPECT_EQ(113, helper_.GetTimestamp().InMicroseconds());
|
||||
EXPECT_EQ(113, helper_.GetTimestamp());
|
||||
EXPECT_TRUE(timestamp_1 == helper_.GetTimestamp());
|
||||
}
|
||||
|
||||
|
||||
TEST_F(AudioTimestampHelperTest, GetDuration) {
|
||||
helper_.SetBaseTimestamp(base::TimeDelta::FromMicroseconds(100));
|
||||
helper_.SetBaseTimestamp(100);
|
||||
|
||||
int frame_count = 5;
|
||||
int64 expected_durations[] = { 113, 113, 114, 113, 113, 114 };
|
||||
for (size_t i = 0; i < arraysize(expected_durations); ++i) {
|
||||
base::TimeDelta duration = helper_.GetFrameDuration(frame_count);
|
||||
EXPECT_EQ(expected_durations[i], duration.InMicroseconds());
|
||||
int64 duration = helper_.GetFrameDuration(frame_count);
|
||||
EXPECT_EQ(expected_durations[i], duration);
|
||||
|
||||
base::TimeDelta timestamp_1 = helper_.GetTimestamp() + duration;
|
||||
int64 timestamp_1 = helper_.GetTimestamp() + duration;
|
||||
helper_.AddFrames(frame_count);
|
||||
base::TimeDelta timestamp_2 = helper_.GetTimestamp();
|
||||
int64 timestamp_2 = helper_.GetTimestamp();
|
||||
EXPECT_TRUE(timestamp_1 == timestamp_2);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,6 +55,8 @@
|
|||
'aes_encryptor.h',
|
||||
'audio_stream_info.cc',
|
||||
'audio_stream_info.h',
|
||||
'audio_timestamp_helper.cc',
|
||||
'audio_timestamp_helper.h',
|
||||
'bit_reader.cc',
|
||||
'bit_reader.h',
|
||||
'buffer_reader.cc',
|
||||
|
@ -94,6 +96,7 @@
|
|||
'stream_info.cc',
|
||||
'stream_info.h',
|
||||
'text_track.h',
|
||||
'timestamp.h',
|
||||
'video_stream_info.cc',
|
||||
'video_stream_info.h',
|
||||
'widevine_encryptor_source.cc',
|
||||
|
@ -111,6 +114,7 @@
|
|||
'type': '<(gtest_target_type)',
|
||||
'sources': [
|
||||
'aes_encryptor_unittest.cc',
|
||||
'audio_timestamp_helper_unittest.cc',
|
||||
'bit_reader_unittest.cc',
|
||||
'buffer_writer_unittest.cc',
|
||||
'closure_thread_unittest.cc',
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file or at
|
||||
// https://developers.google.com/open-source/licenses/bsd
|
||||
|
||||
|
||||
#ifndef MEDIA_BASE_TIMESTAMP_H_
|
||||
#define MEDIA_BASE_TIMESTAMP_H_
|
||||
|
||||
#include "base/basictypes.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
const int64 kNoTimestamp = kint64min;
|
||||
const int64 kInfiniteDuration = kint64max;
|
||||
|
||||
} // namespace media
|
||||
|
||||
#endif // MEDIA_BASE_TIMESTAMP_H_
|
|
@ -8,32 +8,34 @@
|
|||
#include "base/basictypes.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
class StreamParserBuffer;
|
||||
class MediaSample;
|
||||
|
||||
namespace mp2t {
|
||||
|
||||
class EsParser {
|
||||
public:
|
||||
typedef base::Callback<void(scoped_refptr<StreamParserBuffer>)> EmitBufferCB;
|
||||
typedef base::Callback<void(scoped_refptr<MediaSample>)> EmitSampleCB;
|
||||
|
||||
EsParser() {}
|
||||
EsParser(uint32 track_id) : track_id_(track_id) {}
|
||||
virtual ~EsParser() {}
|
||||
|
||||
// ES parsing.
|
||||
// Should use kNoTimestamp when a timestamp is not valid.
|
||||
virtual bool Parse(const uint8* buf, int size,
|
||||
base::TimeDelta pts,
|
||||
base::TimeDelta dts) = 0;
|
||||
virtual bool Parse(const uint8* buf, int size, int64 pts, int64 dts) = 0;
|
||||
|
||||
// Flush any pending buffer.
|
||||
virtual void Flush() = 0;
|
||||
|
||||
// Reset the state of the ES parser.
|
||||
virtual void Reset() = 0;
|
||||
|
||||
uint32 track_id() { return track_id_; }
|
||||
|
||||
private:
|
||||
uint32 track_id_;
|
||||
};
|
||||
|
||||
} // namespace mp2t
|
||||
|
|
|
@ -11,9 +11,8 @@
|
|||
#include "base/strings/string_number_conversions.h"
|
||||
#include "media/base/audio_timestamp_helper.h"
|
||||
#include "media/base/bit_reader.h"
|
||||
#include "media/base/buffers.h"
|
||||
#include "media/base/channel_layout.h"
|
||||
#include "media/base/stream_parser_buffer.h"
|
||||
#include "media/base/media_sample.h"
|
||||
#include "media/base/timestamp.h"
|
||||
#include "media/formats/mp2t/mp2t_common.h"
|
||||
#include "media/formats/mpeg/adts_constants.h"
|
||||
|
||||
|
@ -99,26 +98,26 @@ static bool LookForSyncWord(const uint8* raw_es, int raw_es_size,
|
|||
namespace mp2t {
|
||||
|
||||
EsParserAdts::EsParserAdts(
|
||||
uint32 track_id,
|
||||
const NewAudioConfigCB& new_audio_config_cb,
|
||||
const EmitBufferCB& emit_buffer_cb,
|
||||
const EmitSampleCB& emit_sample_cb,
|
||||
bool sbr_in_mimetype)
|
||||
: new_audio_config_cb_(new_audio_config_cb),
|
||||
emit_buffer_cb_(emit_buffer_cb),
|
||||
: EsParser(track_id),
|
||||
new_audio_config_cb_(new_audio_config_cb),
|
||||
emit_sample_cb_(emit_sample_cb),
|
||||
sbr_in_mimetype_(sbr_in_mimetype) {
|
||||
}
|
||||
|
||||
EsParserAdts::~EsParserAdts() {
|
||||
}
|
||||
|
||||
bool EsParserAdts::Parse(const uint8* buf, int size,
|
||||
base::TimeDelta pts,
|
||||
base::TimeDelta dts) {
|
||||
bool EsParserAdts::Parse(const uint8* buf, int size, int64 pts, int64 dts) {
|
||||
int raw_es_size;
|
||||
const uint8* raw_es;
|
||||
|
||||
// The incoming PTS applies to the access unit that comes just after
|
||||
// the beginning of |buf|.
|
||||
if (pts != kNoTimestamp()) {
|
||||
if (pts != kNoTimestamp) {
|
||||
es_byte_queue_.Peek(&raw_es, &raw_es_size);
|
||||
pts_list_.push_back(EsPts(raw_es_size, pts));
|
||||
}
|
||||
|
@ -156,25 +155,22 @@ bool EsParserAdts::Parse(const uint8* buf, int size,
|
|||
pts_list_.pop_front();
|
||||
}
|
||||
|
||||
base::TimeDelta current_pts = audio_timestamp_helper_->GetTimestamp();
|
||||
base::TimeDelta frame_duration =
|
||||
int64 current_pts = audio_timestamp_helper_->GetTimestamp();
|
||||
int64 frame_duration =
|
||||
audio_timestamp_helper_->GetFrameDuration(kSamplesPerAACFrame);
|
||||
|
||||
// Emit an audio frame.
|
||||
bool is_key_frame = true;
|
||||
|
||||
// TODO(wolenetz/acolwell): Validate and use a common cross-parser TrackId
|
||||
// type and allow multiple audio tracks. See https://crbug.com/341581.
|
||||
scoped_refptr<StreamParserBuffer> stream_parser_buffer =
|
||||
StreamParserBuffer::CopyFrom(
|
||||
scoped_refptr<MediaSample> sample =
|
||||
MediaSample::CopyFrom(
|
||||
&raw_es[es_position],
|
||||
frame_size,
|
||||
is_key_frame,
|
||||
DemuxerStream::AUDIO, 0);
|
||||
stream_parser_buffer->SetDecodeTimestamp(current_pts);
|
||||
stream_parser_buffer->set_timestamp(current_pts);
|
||||
stream_parser_buffer->set_duration(frame_duration);
|
||||
emit_buffer_cb_.Run(stream_parser_buffer);
|
||||
is_key_frame);
|
||||
sample->set_pts(current_pts);
|
||||
sample->set_dts(current_pts);
|
||||
sample->set_duration(frame_duration);
|
||||
emit_sample_cb_.Run(sample);
|
||||
|
||||
// Update the PTS of the next frame.
|
||||
audio_timestamp_helper_->AddFrames(kSamplesPerAACFrame);
|
||||
|
@ -195,10 +191,16 @@ void EsParserAdts::Flush() {
|
|||
void EsParserAdts::Reset() {
|
||||
es_byte_queue_.Reset();
|
||||
pts_list_.clear();
|
||||
last_audio_decoder_config_ = AudioDecoderConfig();
|
||||
last_audio_decoder_config_ = scoped_refptr<AudioStreamInfo>();
|
||||
}
|
||||
|
||||
bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
|
||||
if (last_audio_decoder_config_) {
|
||||
// Varying audio configurations currently not supported. Just assume that
|
||||
// the audio configuration has not changed.
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t frequency_index = ExtractAdtsFrequencyIndex(adts_header);
|
||||
if (frequency_index >= kADTSFrequencyTableSize) {
|
||||
// Frequency index 13 & 14 are reserved
|
||||
|
@ -227,33 +229,38 @@ bool EsParserAdts::UpdateAudioConfiguration(const uint8* adts_header) {
|
|||
? std::min(2 * samples_per_second, 48000)
|
||||
: samples_per_second;
|
||||
|
||||
AudioDecoderConfig audio_decoder_config(
|
||||
last_audio_decoder_config_ = scoped_refptr<AudioStreamInfo>
|
||||
(new AudioStreamInfo(
|
||||
track_id(),
|
||||
kMpeg2Timescale,
|
||||
kInfiniteDuration,
|
||||
kCodecAAC,
|
||||
kSampleFormatS16,
|
||||
std::string(), // TODO(tinskip): calculate codec string.
|
||||
std::string(),
|
||||
16,
|
||||
kADTSChannelLayoutTable[channel_configuration],
|
||||
extended_samples_per_second,
|
||||
NULL, 0,
|
||||
false);
|
||||
samples_per_second,
|
||||
NULL, // TODO(tinskip): calculate AudioSpecificConfig.
|
||||
0,
|
||||
false));
|
||||
|
||||
if (!audio_decoder_config.Matches(last_audio_decoder_config_)) {
|
||||
DVLOG(1) << "Sampling frequency: " << samples_per_second;
|
||||
DVLOG(1) << "Extended sampling frequency: " << extended_samples_per_second;
|
||||
DVLOG(1) << "Channel config: " << channel_configuration;
|
||||
DVLOG(1) << "Adts profile: " << adts_profile;
|
||||
// Reset the timestamp helper to use a new time scale.
|
||||
// Reset the timestamp helper to use a new sampling frequency.
|
||||
if (audio_timestamp_helper_) {
|
||||
base::TimeDelta base_timestamp = audio_timestamp_helper_->GetTimestamp();
|
||||
int64 base_timestamp = audio_timestamp_helper_->GetTimestamp();
|
||||
audio_timestamp_helper_.reset(
|
||||
new AudioTimestampHelper(samples_per_second));
|
||||
new AudioTimestampHelper(kMpeg2Timescale, samples_per_second));
|
||||
audio_timestamp_helper_->SetBaseTimestamp(base_timestamp);
|
||||
} else {
|
||||
audio_timestamp_helper_.reset(
|
||||
new AudioTimestampHelper(samples_per_second));
|
||||
new AudioTimestampHelper(kMpeg2Timescale, samples_per_second));
|
||||
}
|
||||
|
||||
// Audio config notification.
|
||||
last_audio_decoder_config_ = audio_decoder_config;
|
||||
new_audio_config_cb_.Run(audio_decoder_config);
|
||||
}
|
||||
new_audio_config_cb_.Run(last_audio_decoder_config_);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -273,4 +280,3 @@ void EsParserAdts::DiscardEs(int nbytes) {
|
|||
|
||||
} // namespace mp2t
|
||||
} // namespace media
|
||||
|
||||
|
|
|
@ -11,15 +11,13 @@
|
|||
#include "base/callback.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/memory/scoped_ptr.h"
|
||||
#include "base/time/time.h"
|
||||
#include "media/base/audio_decoder_config.h"
|
||||
#include "media/base/audio_stream_info.h"
|
||||
#include "media/base/byte_queue.h"
|
||||
#include "media/formats/mp2t/es_parser.h"
|
||||
|
||||
namespace media {
|
||||
class AudioTimestampHelper;
|
||||
class BitReader;
|
||||
class StreamParserBuffer;
|
||||
}
|
||||
|
||||
namespace media {
|
||||
|
@ -27,23 +25,24 @@ namespace mp2t {
|
|||
|
||||
class EsParserAdts : public EsParser {
|
||||
public:
|
||||
typedef base::Callback<void(const AudioDecoderConfig&)> NewAudioConfigCB;
|
||||
typedef base::Callback<void(scoped_refptr<AudioStreamInfo>&)> NewAudioConfigCB;
|
||||
|
||||
EsParserAdts(const NewAudioConfigCB& new_audio_config_cb,
|
||||
const EmitBufferCB& emit_buffer_cb,
|
||||
EsParserAdts(uint32 track_id,
|
||||
const NewAudioConfigCB& new_audio_config_cb,
|
||||
const EmitSampleCB& emit_sample_cb,
|
||||
bool sbr_in_mimetype);
|
||||
virtual ~EsParserAdts();
|
||||
|
||||
// EsParser implementation.
|
||||
virtual bool Parse(const uint8* buf, int size,
|
||||
base::TimeDelta pts,
|
||||
base::TimeDelta dts) OVERRIDE;
|
||||
int64 pts,
|
||||
int64 dts) OVERRIDE;
|
||||
virtual void Flush() OVERRIDE;
|
||||
virtual void Reset() OVERRIDE;
|
||||
|
||||
private:
|
||||
// Used to link a PTS with a byte position in the ES stream.
|
||||
typedef std::pair<int, base::TimeDelta> EsPts;
|
||||
typedef std::pair<int, int64> EsPts;
|
||||
typedef std::list<EsPts> EsPtsList;
|
||||
|
||||
// Signal any audio configuration change (if any).
|
||||
|
@ -58,7 +57,7 @@ class EsParserAdts : public EsParser {
|
|||
// - to signal a new audio configuration,
|
||||
// - to send ES buffers.
|
||||
NewAudioConfigCB new_audio_config_cb_;
|
||||
EmitBufferCB emit_buffer_cb_;
|
||||
EmitSampleCB emit_sample_cb_;
|
||||
|
||||
// True when AAC SBR extension is signalled in the mimetype
|
||||
// (mp4a.40.5 in the codecs parameter).
|
||||
|
@ -73,8 +72,7 @@ class EsParserAdts : public EsParser {
|
|||
// Interpolated PTS for frames that don't have one.
|
||||
scoped_ptr<AudioTimestampHelper> audio_timestamp_helper_;
|
||||
|
||||
// Last audio config.
|
||||
AudioDecoderConfig last_audio_decoder_config_;
|
||||
scoped_refptr<AudioStreamInfo> last_audio_decoder_config_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(EsParserAdts);
|
||||
};
|
||||
|
@ -83,4 +81,3 @@ class EsParserAdts : public EsParser {
|
|||
} // namespace media
|
||||
|
||||
#endif
|
||||
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file or at
|
||||
# https://developers.google.com/open-source/licenses/bsd
|
||||
|
||||
{
|
||||
'variables': {
|
||||
# Compile as chromium code to enable warnings and warnings-as-errors.
|
||||
'chromium_code': 1,
|
||||
},
|
||||
'target_defaults': {
|
||||
'include_dirs': [
|
||||
'../../..',
|
||||
],
|
||||
},
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'mp2t',
|
||||
'type': '<(component)',
|
||||
'sources': [
|
||||
'es_parser.h',
|
||||
'es_parser_adts.cc',
|
||||
'es_parser_adts.h',
|
||||
],
|
||||
'dependencies': [
|
||||
'../../base/media_base.gyp:base',
|
||||
],
|
||||
},
|
||||
{
|
||||
'target_name': 'mp2t_unittest',
|
||||
'type': '<(gtest_target_type)',
|
||||
'sources': [
|
||||
],
|
||||
'dependencies': [
|
||||
'../../../testing/gtest.gyp:gtest',
|
||||
'../../../testing/gmock.gyp:gmock',
|
||||
'../../test/media_test.gyp:media_test_support',
|
||||
'mp2t',
|
||||
]
|
||||
},
|
||||
],
|
||||
}
|
|
@ -19,3 +19,8 @@
|
|||
|
||||
#endif
|
||||
|
||||
namespace media {
|
||||
|
||||
const uint32 kMpeg2Timescale = 90000;
|
||||
|
||||
} // namespace media
|
||||
|
|
|
@ -17,11 +17,8 @@ const size_t kADTSFrequencyTableSize = arraysize(kADTSFrequencyTable);
|
|||
|
||||
// The following conversion table is extracted from ISO 14496 Part 3 -
|
||||
// Table 1.17 - Channel Configuration.
|
||||
const media::ChannelLayout kADTSChannelLayoutTable[] = {
|
||||
media::CHANNEL_LAYOUT_NONE, media::CHANNEL_LAYOUT_MONO,
|
||||
media::CHANNEL_LAYOUT_STEREO, media::CHANNEL_LAYOUT_SURROUND,
|
||||
media::CHANNEL_LAYOUT_4_0, media::CHANNEL_LAYOUT_5_0_BACK,
|
||||
media::CHANNEL_LAYOUT_5_1_BACK, media::CHANNEL_LAYOUT_7_1};
|
||||
const int kADTSChannelLayoutTable[] = {
|
||||
0, 1, 2, 2, 4, 5, 6, 8 };
|
||||
const size_t kADTSChannelLayoutTableSize = arraysize(kADTSChannelLayoutTable);
|
||||
|
||||
} // namespace media
|
||||
|
|
|
@ -7,9 +7,6 @@
|
|||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "media/base/channel_layout.h"
|
||||
#include "media/base/media_export.h"
|
||||
|
||||
namespace media {
|
||||
|
||||
enum {
|
||||
|
@ -17,11 +14,11 @@ enum {
|
|||
kSamplesPerAACFrame = 1024,
|
||||
};
|
||||
|
||||
MEDIA_EXPORT extern const int kADTSFrequencyTable[];
|
||||
MEDIA_EXPORT extern const size_t kADTSFrequencyTableSize;
|
||||
extern const int kADTSFrequencyTable[];
|
||||
extern const size_t kADTSFrequencyTableSize;
|
||||
|
||||
MEDIA_EXPORT extern const media::ChannelLayout kADTSChannelLayoutTable[];
|
||||
MEDIA_EXPORT extern const size_t kADTSChannelLayoutTableSize;
|
||||
extern const int kADTSChannelLayoutTable[];
|
||||
extern const size_t kADTSChannelLayoutTableSize;
|
||||
|
||||
} // namespace media
|
||||
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file or at
|
||||
# https://developers.google.com/open-source/licenses/bsd
|
||||
|
||||
{
|
||||
'variables': {
|
||||
# Compile as chromium code to enable warnings and warnings-as-errors.
|
||||
'chromium_code': 1,
|
||||
},
|
||||
'target_defaults': {
|
||||
'include_dirs': [
|
||||
'../../..',
|
||||
],
|
||||
},
|
||||
'targets': [
|
||||
{
|
||||
'target_name': 'mpeg',
|
||||
'type': '<(component)',
|
||||
'sources': [
|
||||
'adts_constants.cc',
|
||||
'adts_constants.h',
|
||||
],
|
||||
},
|
||||
{
|
||||
'target_name': 'mpeg_unittest',
|
||||
'type': '<(gtest_target_type)',
|
||||
'sources': [
|
||||
],
|
||||
'dependencies': [
|
||||
'../../../testing/gtest.gyp:gtest',
|
||||
'../../../testing/gmock.gyp:gmock',
|
||||
'../../test/media_test.gyp:media_test_support',
|
||||
'mpeg',
|
||||
]
|
||||
},
|
||||
],
|
||||
}
|
Loading…
Reference in New Issue