Made StreamData Have Const Pointers

In prep for changes to Trick Play, we want to make all messages
copy on write so that if the same message is sent to multiple
handlers, it is not possible for one handler to change the data
another handler is using.

Change-Id: I554166ca11c532412e4dfced5603972ca24dc2bb
This commit is contained in:
Aaron Vaage 2017-09-12 10:24:24 -07:00
parent bc903d2d83
commit 16eff80497
42 changed files with 691 additions and 476 deletions

View File

@ -92,6 +92,10 @@ std::string AudioStreamInfo::ToString() const {
return str; return str;
} }
std::unique_ptr<StreamInfo> AudioStreamInfo::Clone() const {
return std::unique_ptr<StreamInfo>(new AudioStreamInfo(*this));
}
std::string AudioStreamInfo::GetCodecString(Codec codec, std::string AudioStreamInfo::GetCodecString(Codec codec,
uint8_t audio_object_type) { uint8_t audio_object_type) {
switch (codec) { switch (codec) {

View File

@ -33,6 +33,7 @@ class AudioStreamInfo : public StreamInfo {
/// @{ /// @{
bool IsValidConfig() const override; bool IsValidConfig() const override;
std::string ToString() const override; std::string ToString() const override;
std::unique_ptr<StreamInfo> Clone() const override;
/// @} /// @}
uint8_t sample_bits() const { return sample_bits_; } uint8_t sample_bits() const { return sample_bits_; }

View File

@ -48,12 +48,66 @@ struct StreamData {
size_t stream_index = static_cast<size_t>(-1); size_t stream_index = static_cast<size_t>(-1);
StreamDataType stream_data_type = StreamDataType::kUnknown; StreamDataType stream_data_type = StreamDataType::kUnknown;
std::shared_ptr<PeriodInfo> period_info; std::shared_ptr<const PeriodInfo> period_info;
std::shared_ptr<StreamInfo> stream_info; std::shared_ptr<const StreamInfo> stream_info;
std::shared_ptr<MediaSample> media_sample; std::shared_ptr<const MediaSample> media_sample;
std::shared_ptr<TextSample> text_sample; std::shared_ptr<const TextSample> text_sample;
std::shared_ptr<MediaEvent> media_event; std::shared_ptr<const MediaEvent> media_event;
std::shared_ptr<SegmentInfo> segment_info; std::shared_ptr<const SegmentInfo> segment_info;
static std::unique_ptr<StreamData> FromPeriodInfo(
size_t stream_index, std::shared_ptr<const PeriodInfo> period_info) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kPeriodInfo;
stream_data->period_info = std::move(period_info);
return stream_data;
}
static std::unique_ptr<StreamData> FromStreamInfo(
size_t stream_index, std::shared_ptr<const StreamInfo> stream_info) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kStreamInfo;
stream_data->stream_info = std::move(stream_info);
return stream_data;
}
static std::unique_ptr<StreamData> FromMediaSample(
size_t stream_index, std::shared_ptr<const MediaSample> media_sample) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaSample;
stream_data->media_sample = std::move(media_sample);
return stream_data;
}
static std::unique_ptr<StreamData> FromTextSample(
size_t stream_index, std::shared_ptr<const TextSample> text_sample) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kTextSample;
stream_data->text_sample = std::move(text_sample);
return stream_data;
}
static std::unique_ptr<StreamData> FromMediaEvent(
size_t stream_index, std::shared_ptr<const MediaEvent> media_event) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaEvent;
stream_data->media_event = std::move(media_event);
return stream_data;
}
static std::unique_ptr<StreamData> FromSegmentInfo(
size_t stream_index, std::shared_ptr<const SegmentInfo> segment_info) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kSegmentInfo;
stream_data->segment_info = std::move(segment_info);
return stream_data;
}
}; };
/// MediaHandler is the base media processing unit. Media handlers transform /// MediaHandler is the base media processing unit. Media handlers transform
@ -114,64 +168,40 @@ class MediaHandler {
Status Dispatch(std::unique_ptr<StreamData> stream_data); Status Dispatch(std::unique_ptr<StreamData> stream_data);
/// Dispatch the period info to downstream handlers. /// Dispatch the period info to downstream handlers.
Status DispatchPeriodInfo(size_t stream_index, Status DispatchPeriodInfo(
std::shared_ptr<PeriodInfo> period_info) { size_t stream_index, std::shared_ptr<const PeriodInfo> period_info) {
std::unique_ptr<StreamData> stream_data(new StreamData); return Dispatch(StreamData::FromPeriodInfo(stream_index, period_info));
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kPeriodInfo;
stream_data->period_info = std::move(period_info);
return Dispatch(std::move(stream_data));
} }
/// Dispatch the stream info to downstream handlers. /// Dispatch the stream info to downstream handlers.
Status DispatchStreamInfo(size_t stream_index, Status DispatchStreamInfo(
std::shared_ptr<StreamInfo> stream_info) { size_t stream_index, std::shared_ptr<const StreamInfo> stream_info) {
std::unique_ptr<StreamData> stream_data(new StreamData); return Dispatch(StreamData::FromStreamInfo(stream_index, stream_info));
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kStreamInfo;
stream_data->stream_info = std::move(stream_info);
return Dispatch(std::move(stream_data));
} }
/// Dispatch the media sample to downstream handlers. /// Dispatch the media sample to downstream handlers.
Status DispatchMediaSample(size_t stream_index, Status DispatchMediaSample(
std::shared_ptr<MediaSample> media_sample) { size_t stream_index, std::shared_ptr<const MediaSample> media_sample) {
std::unique_ptr<StreamData> stream_data(new StreamData); return Dispatch(StreamData::FromMediaSample(stream_index, media_sample));
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaSample;
stream_data->media_sample = std::move(media_sample);
return Dispatch(std::move(stream_data));
} }
/// Dispatch the text sample to downsream handlers. /// Dispatch the text sample to downsream handlers.
// DispatchTextSample should only be override for testing. // DispatchTextSample should only be override for testing.
Status DispatchTextSample(size_t stream_index, Status DispatchTextSample(
std::shared_ptr<TextSample> text_sample) { size_t stream_index, std::shared_ptr<const TextSample> text_sample) {
std::unique_ptr<StreamData> stream_data(new StreamData); return Dispatch(StreamData::FromTextSample(stream_index, text_sample));
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kTextSample;
stream_data->text_sample = std::move(text_sample);
return Dispatch(std::move(stream_data));
} }
/// Dispatch the media event to downstream handlers. /// Dispatch the media event to downstream handlers.
Status DispatchMediaEvent(size_t stream_index, Status DispatchMediaEvent(
std::shared_ptr<MediaEvent> media_event) { size_t stream_index, std::shared_ptr<const MediaEvent> media_event) {
std::unique_ptr<StreamData> stream_data(new StreamData); return Dispatch(StreamData::FromMediaEvent(stream_index, media_event));
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaEvent;
stream_data->media_event = std::move(media_event);
return Dispatch(std::move(stream_data));
} }
/// Dispatch the segment info to downstream handlers. /// Dispatch the segment info to downstream handlers.
Status DispatchSegmentInfo(size_t stream_index, Status DispatchSegmentInfo(
std::shared_ptr<SegmentInfo> segment_info) { size_t stream_index, std::shared_ptr<const SegmentInfo> segment_info) {
std::unique_ptr<StreamData> stream_data(new StreamData); return Dispatch(StreamData::FromSegmentInfo(stream_index, segment_info));
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kSegmentInfo;
stream_data->segment_info = std::move(segment_info);
return Dispatch(std::move(stream_data));
} }
/// Flush the downstream connected at the specified output stream index. /// Flush the downstream connected at the specified output stream index.

View File

@ -80,45 +80,86 @@ MediaHandlerTestBase::MediaHandlerTestBase()
: next_handler_(new FakeMediaHandler), : next_handler_(new FakeMediaHandler),
some_handler_(new FakeMediaHandler) {} some_handler_(new FakeMediaHandler) {}
std::unique_ptr<StreamData> MediaHandlerTestBase::GetStreamInfoStreamData( bool MediaHandlerTestBase::IsVideoCodec(Codec codec) const {
int stream_index, return codec >= kCodecVideo && codec < kCodecVideoMaxPlusOne;
Codec codec,
uint32_t time_scale) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kStreamInfo;
stream_data->stream_info = GetMockStreamInfo(codec, time_scale);
return stream_data;
} }
std::unique_ptr<StreamData> MediaHandlerTestBase::GetMediaSampleStreamData( std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetVideoStreamInfo(
int stream_index, uint32_t time_scale) const {
return GetVideoStreamInfo(time_scale, kCodecVP9, kWidth, kHeight);
}
std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetVideoStreamInfo(
uint32_t time_scale,
uint32_t width,
uint64_t height) const {
return GetVideoStreamInfo(time_scale, kCodecVP9, width, height);
}
std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetVideoStreamInfo(
uint32_t time_scale,
Codec codec) const {
return GetVideoStreamInfo(time_scale, codec, kWidth, kHeight);
}
std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetVideoStreamInfo(
uint32_t time_scale,
Codec codec,
uint32_t width,
uint64_t height) const {
return std::unique_ptr<VideoStreamInfo>(new VideoStreamInfo(
kTrackId, time_scale, kDuration, codec, H26xStreamFormat::kUnSpecified,
kCodecString, kCodecConfig, sizeof(kCodecConfig), width, height,
kPixelWidth, kPixelHeight, kTrickPlayFactor, kNaluLengthSize, kLanguage,
!kEncrypted));
}
std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetAudioStreamInfo(
uint32_t time_scale) const {
return GetAudioStreamInfo(time_scale, kCodecAAC);
}
std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetAudioStreamInfo(
uint32_t time_scale,
Codec codec) const {
return std::unique_ptr<AudioStreamInfo>(new AudioStreamInfo(
kTrackId, time_scale, kDuration, codec, kCodecString, kCodecConfig,
sizeof(kCodecConfig), kSampleBits, kNumChannels, kSamplingFrequency,
kSeekPrerollNs, kCodecDelayNs, kMaxBitrate, kAvgBitrate, kLanguage,
!kEncrypted));
}
std::unique_ptr<MediaSample> MediaHandlerTestBase::GetMediaSample(
int64_t timestamp, int64_t timestamp,
int64_t duration, int64_t duration,
bool is_keyframe) { bool is_keyframe) const {
std::unique_ptr<StreamData> stream_data(new StreamData); return GetMediaSample(timestamp, duration, is_keyframe, kData, sizeof(kData));
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaSample;
stream_data->media_sample.reset(
new MediaSample(kData, sizeof(kData), nullptr, 0, is_keyframe));
stream_data->media_sample->set_dts(timestamp);
stream_data->media_sample->set_duration(duration);
return stream_data;
} }
std::unique_ptr<StreamData> MediaHandlerTestBase::GetSegmentInfoStreamData( std::unique_ptr<MediaSample> MediaHandlerTestBase::GetMediaSample(
int stream_index, int64_t timestamp,
int64_t duration,
bool is_keyframe,
const uint8_t* data,
size_t data_length) const {
std::unique_ptr<MediaSample> sample(
new MediaSample(data, data_length, nullptr, 0, is_keyframe));
sample->set_dts(timestamp);
sample->set_duration(duration);
return sample;
}
std::unique_ptr<SegmentInfo> MediaHandlerTestBase::GetSegmentInfo(
int64_t start_timestamp, int64_t start_timestamp,
int64_t duration, int64_t duration,
bool is_subsegment) { bool is_subsegment) const {
std::unique_ptr<StreamData> stream_data(new StreamData); std::unique_ptr<SegmentInfo> info(new SegmentInfo);
stream_data->stream_index = stream_index; info->start_timestamp = start_timestamp;
stream_data->stream_data_type = StreamDataType::kSegmentInfo; info->duration = duration;
stream_data->segment_info.reset(new SegmentInfo); info->is_subsegment = is_subsegment;
stream_data->segment_info->start_timestamp = start_timestamp;
stream_data->segment_info->duration = duration; return info;
stream_data->segment_info->is_subsegment = is_subsegment;
return stream_data;
} }
void MediaHandlerTestBase::SetUpGraph(size_t num_inputs, void MediaHandlerTestBase::SetUpGraph(size_t num_inputs,
@ -143,24 +184,5 @@ void MediaHandlerTestBase::ClearOutputStreamDataVector() {
next_handler_->clear_stream_data_vector(); next_handler_->clear_stream_data_vector();
} }
std::shared_ptr<StreamInfo> MediaHandlerTestBase::GetMockStreamInfo(
Codec codec,
uint32_t time_scale) {
if (codec >= kCodecAudio && codec < kCodecAudioMaxPlusOne) {
return std::shared_ptr<StreamInfo>(new AudioStreamInfo(
kTrackId, time_scale, kDuration, codec, kCodecString, kCodecConfig,
sizeof(kCodecConfig), kSampleBits, kNumChannels, kSamplingFrequency,
kSeekPrerollNs, kCodecDelayNs, kMaxBitrate, kAvgBitrate, kLanguage,
!kEncrypted));
} else if (codec >= kCodecVideo && codec < kCodecVideoMaxPlusOne) {
return std::shared_ptr<StreamInfo>(new VideoStreamInfo(
kTrackId, time_scale, kDuration, codec, H26xStreamFormat::kUnSpecified,
kCodecString, kCodecConfig, sizeof(kCodecConfig), kWidth, kHeight,
kPixelWidth, kPixelHeight, kTrickPlayFactor, kNaluLengthSize, kLanguage,
!kEncrypted));
}
return nullptr;
}
} // namespace media } // namespace media
} // namespace shaka } // namespace shaka

View File

@ -95,36 +95,46 @@ class MediaHandlerTestBase : public ::testing::Test {
public: public:
MediaHandlerTestBase(); MediaHandlerTestBase();
/// @return a stream data with mock stream info. bool IsVideoCodec(Codec codec) const;
std::unique_ptr<StreamData> GetStreamInfoStreamData(int stream_index,
std::unique_ptr<StreamInfo> GetVideoStreamInfo(
uint32_t time_scale) const;
std::unique_ptr<StreamInfo> GetVideoStreamInfo(
uint32_t time_scale, uint32_t width, uint64_t height) const;
std::unique_ptr<StreamInfo> GetVideoStreamInfo(
uint32_t time_scale, Codec codec) const;
std::unique_ptr<StreamInfo> GetVideoStreamInfo(
uint32_t time_scale,
Codec codec, Codec codec,
uint32_t time_scale); uint32_t width,
uint64_t height) const;
/// @return a stream data with mock video stream info. std::unique_ptr<StreamInfo> GetAudioStreamInfo(
std::unique_ptr<StreamData> GetVideoStreamInfoStreamData( uint32_t time_scale) const;
int stream_index,
uint32_t time_scale) {
return GetStreamInfoStreamData(stream_index, kCodecVP9, time_scale);
}
/// @return a stream data with mock audio stream info. std::unique_ptr<StreamInfo> GetAudioStreamInfo(
std::unique_ptr<StreamData> GetAudioStreamInfoStreamData( uint32_t time_scale,
int stream_index, Codec codec) const;
uint32_t time_scale) {
return GetStreamInfoStreamData(stream_index, kCodecAAC, time_scale);
}
/// @return a stream data with mock media sample. std::unique_ptr<MediaSample> GetMediaSample(
std::unique_ptr<StreamData> GetMediaSampleStreamData(int stream_index,
int64_t timestamp, int64_t timestamp,
int64_t duration, int64_t duration,
bool is_keyframe); bool is_keyframe) const;
/// @return a stream data with mock segment info. std::unique_ptr<MediaSample> GetMediaSample(
std::unique_ptr<StreamData> GetSegmentInfoStreamData(int stream_index, int64_t timestamp,
int64_t duration,
bool is_keyframe,
const uint8_t* data,
size_t data_length) const;
std::unique_ptr<SegmentInfo> GetSegmentInfo(
int64_t start_timestamp, int64_t start_timestamp,
int64_t duration, int64_t duration,
bool is_subsegment); bool is_subsegment) const;
/// Setup a graph using |handler| with |num_inputs| and |num_outputs|. /// Setup a graph using |handler| with |num_inputs| and |num_outputs|.
void SetUpGraph(size_t num_inputs, void SetUpGraph(size_t num_inputs,
@ -148,10 +158,6 @@ class MediaHandlerTestBase : public ::testing::Test {
MediaHandlerTestBase(const MediaHandlerTestBase&) = delete; MediaHandlerTestBase(const MediaHandlerTestBase&) = delete;
MediaHandlerTestBase& operator=(const MediaHandlerTestBase&) = delete; MediaHandlerTestBase& operator=(const MediaHandlerTestBase&) = delete;
// Get a mock stream info for testing.
std::shared_ptr<StreamInfo> GetMockStreamInfo(Codec codec,
uint32_t time_scale);
// Downstream handler used in testing graph. // Downstream handler used in testing graph.
std::shared_ptr<FakeMediaHandler> next_handler_; std::shared_ptr<FakeMediaHandler> next_handler_;
// Some random handler which can be used for testing. // Some random handler which can be used for testing.

View File

@ -50,10 +50,10 @@ Status Muxer::Process(std::unique_ptr<StreamData> stream_data) {
} }
return InitializeMuxer(); return InitializeMuxer();
case StreamDataType::kSegmentInfo: { case StreamDataType::kSegmentInfo: {
auto& segment_info = stream_data->segment_info; const auto& segment_info = *stream_data->segment_info;
if (muxer_listener_ && segment_info->is_encrypted) { if (muxer_listener_ && segment_info.is_encrypted) {
const EncryptionConfig* encryption_config = const EncryptionConfig* encryption_config =
segment_info->key_rotation_encryption_config.get(); segment_info.key_rotation_encryption_config.get();
// Only call OnEncryptionInfoReady again when key updates. // Only call OnEncryptionInfoReady again when key updates.
if (encryption_config && encryption_config->key_id != current_key_id_) { if (encryption_config && encryption_config->key_id != current_key_id_) {
muxer_listener_->OnEncryptionInfoReady( muxer_listener_->OnEncryptionInfoReady(
@ -67,12 +67,11 @@ Status Muxer::Process(std::unique_ptr<StreamData> stream_data) {
muxer_listener_->OnEncryptionStart(); muxer_listener_->OnEncryptionStart();
} }
} }
return FinalizeSegment(stream_data->stream_index, return FinalizeSegment(stream_data->stream_index, segment_info);
std::move(segment_info));
} }
case StreamDataType::kMediaSample: case StreamDataType::kMediaSample:
return AddSample(stream_data->stream_index, return AddSample(stream_data->stream_index,
std::move(stream_data->media_sample)); *stream_data->media_sample);
default: default:
VLOG(3) << "Stream data type " VLOG(3) << "Stream data type "
<< static_cast<int>(stream_data->stream_data_type) << " ignored."; << static_cast<int>(stream_data->stream_data_type) << " ignored.";

View File

@ -44,7 +44,7 @@ class Muxer : public MediaHandler {
/// @param progress_listener should not be NULL. /// @param progress_listener should not be NULL.
void SetProgressListener(std::unique_ptr<ProgressListener> progress_listener); void SetProgressListener(std::unique_ptr<ProgressListener> progress_listener);
const std::vector<std::shared_ptr<StreamInfo>>& streams() const { const std::vector<std::shared_ptr<const StreamInfo>>& streams() const {
return streams_; return streams_;
} }
@ -79,15 +79,17 @@ class Muxer : public MediaHandler {
virtual Status Finalize() = 0; virtual Status Finalize() = 0;
// Add a new sample. // Add a new sample.
virtual Status AddSample(size_t stream_id, virtual Status AddSample(
std::shared_ptr<MediaSample> sample) = 0; size_t stream_id,
const MediaSample& sample) = 0;
// Finalize the segment or subsegment. // Finalize the segment or subsegment.
virtual Status FinalizeSegment(size_t stream_id, virtual Status FinalizeSegment(
std::shared_ptr<SegmentInfo> segment_info) = 0; size_t stream_id,
const SegmentInfo& segment_info) = 0;
MuxerOptions options_; MuxerOptions options_;
std::vector<std::shared_ptr<StreamInfo>> streams_; std::vector<std::shared_ptr<const StreamInfo>> streams_;
std::vector<uint8_t> current_key_id_; std::vector<uint8_t> current_key_id_;
bool encryption_started_ = false; bool encryption_started_ = false;
bool cancelled_; bool cancelled_;

View File

@ -7,6 +7,7 @@
#ifndef MEDIA_BASE_STREAM_INFO_H_ #ifndef MEDIA_BASE_STREAM_INFO_H_
#define MEDIA_BASE_STREAM_INFO_H_ #define MEDIA_BASE_STREAM_INFO_H_
#include <memory>
#include <string> #include <string>
#include <vector> #include <vector>
@ -72,6 +73,11 @@ class StreamInfo {
/// @return A human-readable string describing the stream info. /// @return A human-readable string describing the stream info.
virtual std::string ToString() const; virtual std::string ToString() const;
/// @return A new copy of this stream info. The copy will be of the same
/// type as the original. This should be used when a copy is needed
/// without explicitly knowing the stream info type.
virtual std::unique_ptr<StreamInfo> Clone() const = 0;
StreamType stream_type() const { return stream_type_; } StreamType stream_type() const { return stream_type_; }
uint32_t track_id() const { return track_id_; } uint32_t track_id() const { return track_id_; }
uint32_t time_scale() const { return time_scale_; } uint32_t time_scale() const { return time_scale_; }

View File

@ -28,5 +28,9 @@ bool TextStreamInfo::IsValidConfig() const {
return true; return true;
} }
std::unique_ptr<StreamInfo> TextStreamInfo::Clone() const {
return std::unique_ptr<StreamInfo>(new TextStreamInfo(*this));
}
} // namespace media } // namespace media
} // namespace shaka } // namespace shaka

View File

@ -38,6 +38,8 @@ class TextStreamInfo : public StreamInfo {
bool IsValidConfig() const override; bool IsValidConfig() const override;
std::unique_ptr<StreamInfo> Clone() const override;
uint16_t width() const { return width_; } uint16_t width() const { return width_; }
uint16_t height() const { return height_; } uint16_t height() const { return height_; }

View File

@ -88,5 +88,9 @@ std::string VideoStreamInfo::ToString() const {
nalu_length_size_); nalu_length_size_);
} }
std::unique_ptr<StreamInfo> VideoStreamInfo::Clone() const {
return std::unique_ptr<StreamInfo>(new VideoStreamInfo(*this));
}
} // namespace media } // namespace media
} // namespace shaka } // namespace shaka

View File

@ -50,6 +50,7 @@ class VideoStreamInfo : public StreamInfo {
/// @{ /// @{
bool IsValidConfig() const override; bool IsValidConfig() const override;
std::string ToString() const override; std::string ToString() const override;
std::unique_ptr<StreamInfo> Clone() const override;
/// @} /// @}
H26xStreamFormat h26x_stream_format() const { return h26x_stream_format_; } H26xStreamFormat h26x_stream_format() const { return h26x_stream_format_; }

View File

@ -56,15 +56,17 @@ TEST_F(ChunkingHandlerTest, AudioNoSubsegmentsThenFlush) {
chunking_params.segment_duration_in_seconds = 1; chunking_params.segment_duration_in_seconds = 1;
SetUpChunkingHandler(1, chunking_params); SetUpChunkingHandler(1, chunking_params);
ASSERT_OK(Process(GetAudioStreamInfoStreamData(kStreamIndex0, kTimeScale0))); ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
EXPECT_THAT( EXPECT_THAT(
GetOutputStreamDataVector(), GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted))); ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted)));
for (int i = 0; i < 5; ++i) { for (int i = 0; i < 5; ++i) {
ClearOutputStreamDataVector(); ClearOutputStreamDataVector();
ASSERT_OK(Process(GetMediaSampleStreamData(kStreamIndex0, i * kDuration1, ASSERT_OK(Process(StreamData::FromMediaSample(
kDuration1, kKeyFrame))); kStreamIndex0,
GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
// One output stream_data except when i == 3, which also has SegmentInfo. // One output stream_data except when i == 3, which also has SegmentInfo.
if (i == 3) { if (i == 3) {
EXPECT_THAT(GetOutputStreamDataVector(), EXPECT_THAT(GetOutputStreamDataVector(),
@ -93,10 +95,12 @@ TEST_F(ChunkingHandlerTest, AudioWithSubsegments) {
chunking_params.subsegment_duration_in_seconds = 0.5; chunking_params.subsegment_duration_in_seconds = 0.5;
SetUpChunkingHandler(1, chunking_params); SetUpChunkingHandler(1, chunking_params);
ASSERT_OK(Process(GetAudioStreamInfoStreamData(kStreamIndex0, kTimeScale0))); ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
for (int i = 0; i < 5; ++i) { for (int i = 0; i < 5; ++i) {
ASSERT_OK(Process(GetMediaSampleStreamData(kStreamIndex0, i * kDuration1, ASSERT_OK(Process(StreamData::FromMediaSample(
kDuration1, kKeyFrame))); kStreamIndex0,
GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
} }
EXPECT_THAT( EXPECT_THAT(
GetOutputStreamDataVector(), GetOutputStreamDataVector(),
@ -120,14 +124,18 @@ TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
chunking_params.subsegment_duration_in_seconds = 0.3; chunking_params.subsegment_duration_in_seconds = 0.3;
SetUpChunkingHandler(1, chunking_params); SetUpChunkingHandler(1, chunking_params);
ASSERT_OK(Process(GetVideoStreamInfoStreamData(kStreamIndex0, kTimeScale1))); ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetVideoStreamInfo(kTimeScale1))));
const int64_t kVideoStartTimestamp = 12345; const int64_t kVideoStartTimestamp = 12345;
for (int i = 0; i < 6; ++i) { for (int i = 0; i < 6; ++i) {
// Alternate key frame. // Alternate key frame.
const bool is_key_frame = (i % 2) == 1; const bool is_key_frame = (i % 2) == 1;
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kVideoStartTimestamp + i * kDuration1, kDuration1, kStreamIndex0,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + i * kDuration1,
kDuration1,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(
GetOutputStreamDataVector(), GetOutputStreamDataVector(),
@ -159,8 +167,10 @@ TEST_F(ChunkingHandlerTest, AudioAndVideo) {
chunking_params.subsegment_duration_in_seconds = 0.3; chunking_params.subsegment_duration_in_seconds = 0.3;
SetUpChunkingHandler(2, chunking_params); SetUpChunkingHandler(2, chunking_params);
ASSERT_OK(Process(GetAudioStreamInfoStreamData(kStreamIndex0, kTimeScale0))); ASSERT_OK(Process(StreamData::FromStreamInfo(
ASSERT_OK(Process(GetVideoStreamInfoStreamData(kStreamIndex1, kTimeScale1))); kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex1, GetVideoStreamInfo(kTimeScale1))));
EXPECT_THAT( EXPECT_THAT(
GetOutputStreamDataVector(), GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted), ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted),
@ -171,14 +181,20 @@ TEST_F(ChunkingHandlerTest, AudioAndVideo) {
const int64_t kAudioStartTimestamp = 9876; const int64_t kAudioStartTimestamp = 9876;
const int64_t kVideoStartTimestamp = 12345; const int64_t kVideoStartTimestamp = 12345;
for (int i = 0; i < 5; ++i) { for (int i = 0; i < 5; ++i) {
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kAudioStartTimestamp + kDuration0 * i, kDuration0, kStreamIndex0,
true))); GetMediaSample(
kAudioStartTimestamp + kDuration0 * i,
kDuration0,
true))));
// Alternate key frame. // Alternate key frame.
const bool is_key_frame = (i % 2) == 1; const bool is_key_frame = (i % 2) == 1;
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex1, kVideoStartTimestamp + kDuration1 * i, kDuration1, kStreamIndex1,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + kDuration1 * i,
kDuration1,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(
@ -211,15 +227,24 @@ TEST_F(ChunkingHandlerTest, AudioAndVideo) {
// The side comments below show the equivalent timestamp in video timescale. // The side comments below show the equivalent timestamp in video timescale.
// The audio and video are made ~aligned. // The audio and video are made ~aligned.
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kAudioStartTimestamp + kDuration0 * 5, kDuration0, kStreamIndex0,
true))); // 13595 GetMediaSample(
ASSERT_OK(Process(GetMediaSampleStreamData( kAudioStartTimestamp + kDuration0 * 5,
kStreamIndex1, kVideoStartTimestamp + kDuration1 * 5, kDuration1, kDuration0,
true))); // 13845 true)))); // 13595
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kAudioStartTimestamp + kDuration0 * 6, kDuration0, kStreamIndex1,
true))); // 13845 GetMediaSample(
kVideoStartTimestamp + kDuration1 * 5,
kDuration1,
true)))); // 13845
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(
kAudioStartTimestamp + kDuration0 * 6,
kDuration0,
true)))); // 13845
// This expectation are separated from the expectation above because // This expectation are separated from the expectation above because
// ElementsAre supports at most 10 elements. // ElementsAre supports at most 10 elements.
EXPECT_THAT( EXPECT_THAT(

View File

@ -15,6 +15,7 @@
#include "packager/media/base/aes_pattern_cryptor.h" #include "packager/media/base/aes_pattern_cryptor.h"
#include "packager/media/base/key_source.h" #include "packager/media/base/key_source.h"
#include "packager/media/base/media_sample.h" #include "packager/media/base/media_sample.h"
#include "packager/media/base/audio_stream_info.h"
#include "packager/media/base/video_stream_info.h" #include "packager/media/base/video_stream_info.h"
#include "packager/media/codecs/video_slice_header_parser.h" #include "packager/media/codecs/video_slice_header_parser.h"
#include "packager/media/codecs/vp8_parser.h" #include "packager/media/codecs/vp8_parser.h"
@ -26,6 +27,9 @@ namespace media {
namespace { namespace {
const size_t kCencBlockSize = 16u; const size_t kCencBlockSize = 16u;
// The encryption handler only supports a single output.
const size_t kStreamIndex = 0;
// The default KID for key rotation is all 0s. // The default KID for key rotation is all 0s.
const uint8_t kKeyRotationDefaultKeyId[] = { const uint8_t kKeyRotationDefaultKeyId[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@ -98,13 +102,13 @@ Status EncryptionHandler::InitializeInternal() {
} }
Status EncryptionHandler::Process(std::unique_ptr<StreamData> stream_data) { Status EncryptionHandler::Process(std::unique_ptr<StreamData> stream_data) {
Status status;
switch (stream_data->stream_data_type) { switch (stream_data->stream_data_type) {
case StreamDataType::kStreamInfo: case StreamDataType::kStreamInfo:
status = ProcessStreamInfo(stream_data->stream_info.get()); return ProcessStreamInfo(*stream_data->stream_info);
break;
case StreamDataType::kSegmentInfo: { case StreamDataType::kSegmentInfo: {
SegmentInfo* segment_info = stream_data->segment_info.get(); std::shared_ptr<SegmentInfo> segment_info(new SegmentInfo(
*stream_data->segment_info));
segment_info->is_encrypted = remaining_clear_lead_ <= 0; segment_info->is_encrypted = remaining_clear_lead_ <= 0;
const bool key_rotation_enabled = crypto_period_duration_ != 0; const bool key_rotation_enabled = crypto_period_duration_ != 0;
@ -116,25 +120,28 @@ Status EncryptionHandler::Process(std::unique_ptr<StreamData> stream_data) {
if (remaining_clear_lead_ > 0) if (remaining_clear_lead_ > 0)
remaining_clear_lead_ -= segment_info->duration; remaining_clear_lead_ -= segment_info->duration;
} }
break;
return DispatchSegmentInfo(kStreamIndex, segment_info);
} }
case StreamDataType::kMediaSample: case StreamDataType::kMediaSample:
status = ProcessMediaSample(stream_data->media_sample.get()); return ProcessMediaSample(std::move(stream_data->media_sample));
break;
default: default:
VLOG(3) << "Stream data type " VLOG(3) << "Stream data type "
<< static_cast<int>(stream_data->stream_data_type) << " ignored."; << static_cast<int>(stream_data->stream_data_type) << " ignored.";
break; return Dispatch(std::move(stream_data));
} }
return status.ok() ? Dispatch(std::move(stream_data)) : status;
} }
Status EncryptionHandler::ProcessStreamInfo(StreamInfo* stream_info) { Status EncryptionHandler::ProcessStreamInfo(const StreamInfo& clear_info) {
if (stream_info->is_encrypted()) { if (clear_info.is_encrypted()) {
return Status(error::INVALID_ARGUMENT, return Status(error::INVALID_ARGUMENT,
"Input stream is already encrypted."); "Input stream is already encrypted.");
} }
DCHECK_NE(kStreamUnknown, clear_info.stream_type());
DCHECK_NE(kStreamText, clear_info.stream_type());
std::shared_ptr<StreamInfo> stream_info = clear_info.Clone();
remaining_clear_lead_ = remaining_clear_lead_ =
encryption_params_.clear_lead_in_seconds * stream_info->time_scale(); encryption_params_.clear_lead_in_seconds * stream_info->time_scale();
crypto_period_duration_ = crypto_period_duration_ =
@ -196,16 +203,21 @@ Status EncryptionHandler::ProcessStreamInfo(StreamInfo* stream_info) {
stream_info->set_is_encrypted(true); stream_info->set_is_encrypted(true);
stream_info->set_has_clear_lead(encryption_params_.clear_lead_in_seconds > 0); stream_info->set_has_clear_lead(encryption_params_.clear_lead_in_seconds > 0);
stream_info->set_encryption_config(*encryption_config_); stream_info->set_encryption_config(*encryption_config_);
return Status::OK;
return DispatchStreamInfo(kStreamIndex, stream_info);
} }
Status EncryptionHandler::ProcessMediaSample(MediaSample* sample) { Status EncryptionHandler::ProcessMediaSample(
std::shared_ptr<const MediaSample> clear_sample) {
DCHECK(clear_sample);
// We need to parse the frame (which also updates the vpx parser) even if the // We need to parse the frame (which also updates the vpx parser) even if the
// frame is not encrypted as the next (encrypted) frame may be dependent on // frame is not encrypted as the next (encrypted) frame may be dependent on
// this clear frame. // this clear frame.
std::vector<VPxFrameInfo> vpx_frames; std::vector<VPxFrameInfo> vpx_frames;
if (vpx_parser_ && if (vpx_parser_ && !vpx_parser_->Parse(clear_sample->data(),
!vpx_parser_->Parse(sample->data(), sample->data_size(), &vpx_frames)) { clear_sample->data_size(),
&vpx_frames)) {
return Status(error::ENCRYPTION_FAILURE, "Failed to parse vpx frame."); return Status(error::ENCRYPTION_FAILURE, "Failed to parse vpx frame.");
} }
@ -214,7 +226,7 @@ Status EncryptionHandler::ProcessMediaSample(MediaSample* sample) {
// allows clients to prefetch the keys. // allows clients to prefetch the keys.
if (check_new_crypto_period_) { if (check_new_crypto_period_) {
const int64_t current_crypto_period_index = const int64_t current_crypto_period_index =
sample->dts() / crypto_period_duration_; clear_sample->dts() / crypto_period_duration_;
if (current_crypto_period_index != prev_crypto_period_index_) { if (current_crypto_period_index != prev_crypto_period_index_) {
EncryptionKey encryption_key; EncryptionKey encryption_key;
Status status = key_source_->GetCryptoPeriodKey( Status status = key_source_->GetCryptoPeriodKey(
@ -228,38 +240,69 @@ Status EncryptionHandler::ProcessMediaSample(MediaSample* sample) {
check_new_crypto_period_ = false; check_new_crypto_period_ = false;
} }
if (remaining_clear_lead_ > 0) // Since there is no encryption needed right now, send the clear copy
return Status::OK; // downstream so we can save the costs of copying it.
if (remaining_clear_lead_ > 0) {
return DispatchMediaSample(kStreamIndex, std::move(clear_sample));
}
std::unique_ptr<DecryptConfig> decrypt_config( std::unique_ptr<DecryptConfig> decrypt_config(new DecryptConfig(
new DecryptConfig(encryption_config_->key_id, encryptor_->iv(), encryption_config_->key_id,
std::vector<SubsampleEntry>(), protection_scheme_, encryptor_->iv(),
crypt_byte_block_, skip_byte_block_)); std::vector<SubsampleEntry>(),
bool result = true; protection_scheme_,
crypt_byte_block_,
skip_byte_block_));
// Now that we know that this sample must be encrypted, make a copy of
// the sample first so that all the encryption operations can be done
// in-place.
std::shared_ptr<MediaSample> cipher_sample =
MediaSample::CopyFrom(*clear_sample);
Status result;
if (vpx_parser_) { if (vpx_parser_) {
result = EncryptVpxFrame(vpx_frames, sample, decrypt_config.get()); if (EncryptVpxFrame(vpx_frames,
if (result) { cipher_sample->writable_data(),
cipher_sample->data_size(),
decrypt_config.get())) {
DCHECK_EQ(decrypt_config->GetTotalSizeOfSubsamples(), DCHECK_EQ(decrypt_config->GetTotalSizeOfSubsamples(),
sample->data_size()); cipher_sample->data_size());
} else {
result = Status(
error::ENCRYPTION_FAILURE,
"Failed to encrypt VPX frame.");
} }
} else if (header_parser_) { } else if (header_parser_) {
result = EncryptNalFrame(sample, decrypt_config.get()); if (EncryptNalFrame(cipher_sample->writable_data(),
if (result) { cipher_sample->data_size(),
decrypt_config.get())) {
DCHECK_EQ(decrypt_config->GetTotalSizeOfSubsamples(), DCHECK_EQ(decrypt_config->GetTotalSizeOfSubsamples(),
sample->data_size()); cipher_sample->data_size());
}
} else { } else {
if (sample->data_size() > leading_clear_bytes_size_) { result = Status(
EncryptBytes(sample->writable_data() + leading_clear_bytes_size_, error::ENCRYPTION_FAILURE,
sample->data_size() - leading_clear_bytes_size_); "Failed to encrypt NAL frame.");
} }
} else if (cipher_sample->data_size() > leading_clear_bytes_size_) {
EncryptBytes(
cipher_sample->writable_data() + leading_clear_bytes_size_,
cipher_sample->data_size() - leading_clear_bytes_size_);
} }
if (!result)
return Status(error::ENCRYPTION_FAILURE, "Failed to encrypt samples."); if (!result.ok()) {
sample->set_is_encrypted(true); return result;
sample->set_decrypt_config(std::move(decrypt_config)); }
encryptor_->UpdateIv(); encryptor_->UpdateIv();
return Status::OK;
// Finish initializing the sample before sending it downstream. We must
// wait until now to finish the initialization as we will loose access to
// |decrypt_config| once we set it.
cipher_sample->set_is_encrypted(true);
cipher_sample->set_decrypt_config(std::move(decrypt_config));
return DispatchMediaSample(kStreamIndex, std::move(cipher_sample));
} }
Status EncryptionHandler::SetupProtectionPattern(StreamType stream_type) { Status EncryptionHandler::SetupProtectionPattern(StreamType stream_type) {
@ -390,9 +433,10 @@ bool EncryptionHandler::CreateEncryptor(const EncryptionKey& encryption_key) {
bool EncryptionHandler::EncryptVpxFrame( bool EncryptionHandler::EncryptVpxFrame(
const std::vector<VPxFrameInfo>& vpx_frames, const std::vector<VPxFrameInfo>& vpx_frames,
MediaSample* sample, uint8_t* source,
size_t source_size,
DecryptConfig* decrypt_config) { DecryptConfig* decrypt_config) {
uint8_t* data = sample->writable_data(); uint8_t* data = source;
for (const VPxFrameInfo& frame : vpx_frames) { for (const VPxFrameInfo& frame : vpx_frames) {
uint16_t clear_bytes = uint16_t clear_bytes =
static_cast<uint16_t>(frame.uncompressed_header_size); static_cast<uint16_t>(frame.uncompressed_header_size);
@ -419,7 +463,7 @@ bool EncryptionHandler::EncryptVpxFrame(
// Add subsample for the superframe index if exists. // Add subsample for the superframe index if exists.
const bool is_superframe = vpx_frames.size() > 1; const bool is_superframe = vpx_frames.size() > 1;
if (is_superframe) { if (is_superframe) {
size_t index_size = sample->data() + sample->data_size() - data; size_t index_size = source + source_size - data;
DCHECK_LE(index_size, 2 + vpx_frames.size() * 4); DCHECK_LE(index_size, 2 + vpx_frames.size() * 4);
DCHECK_GE(index_size, 2 + vpx_frames.size() * 1); DCHECK_GE(index_size, 2 + vpx_frames.size() * 1);
uint16_t clear_bytes = static_cast<uint16_t>(index_size); uint16_t clear_bytes = static_cast<uint16_t>(index_size);
@ -429,14 +473,14 @@ bool EncryptionHandler::EncryptVpxFrame(
return true; return true;
} }
bool EncryptionHandler::EncryptNalFrame(MediaSample* sample, bool EncryptionHandler::EncryptNalFrame(uint8_t* data,
size_t data_length,
DecryptConfig* decrypt_config) { DecryptConfig* decrypt_config) {
DCHECK_NE(nalu_length_size_, 0u); DCHECK_NE(nalu_length_size_, 0u);
DCHECK(header_parser_); DCHECK(header_parser_);
const Nalu::CodecType nalu_type = const Nalu::CodecType nalu_type =
(codec_ == kCodecH265) ? Nalu::kH265 : Nalu::kH264; (codec_ == kCodecH265) ? Nalu::kH265 : Nalu::kH264;
NaluReader reader(nalu_type, nalu_length_size_, sample->writable_data(), NaluReader reader(nalu_type, nalu_length_size_, data, data_length);
sample->data_size());
// Store the current length of clear data. This is used to squash // Store the current length of clear data. This is used to squash
// multiple unencrypted NAL units into fewer subsample entries. // multiple unencrypted NAL units into fewer subsample entries.
@ -496,6 +540,7 @@ bool EncryptionHandler::EncryptNalFrame(MediaSample* sample,
} }
void EncryptionHandler::EncryptBytes(uint8_t* data, size_t size) { void EncryptionHandler::EncryptBytes(uint8_t* data, size_t size) {
DCHECK(data);
DCHECK(encryptor_); DCHECK(encryptor_);
CHECK(encryptor_->Crypt(data, size, data)); CHECK(encryptor_->Crypt(data, size, data));
} }

View File

@ -41,17 +41,21 @@ class EncryptionHandler : public MediaHandler {
EncryptionHandler& operator=(const EncryptionHandler&) = delete; EncryptionHandler& operator=(const EncryptionHandler&) = delete;
// Processes |stream_info| and sets up stream specific variables. // Processes |stream_info| and sets up stream specific variables.
Status ProcessStreamInfo(StreamInfo* stream_info); Status ProcessStreamInfo(const StreamInfo& stream_info);
// Processes media sample and encrypts it if needed. // Processes media sample and encrypts it if needed.
Status ProcessMediaSample(MediaSample* sample); Status ProcessMediaSample(std::shared_ptr<const MediaSample> clear_sample);
Status SetupProtectionPattern(StreamType stream_type); Status SetupProtectionPattern(StreamType stream_type);
bool CreateEncryptor(const EncryptionKey& encryption_key); bool CreateEncryptor(const EncryptionKey& encryption_key);
bool EncryptVpxFrame(const std::vector<VPxFrameInfo>& vpx_frames, bool EncryptVpxFrame(const std::vector<VPxFrameInfo>& vpx_frames,
MediaSample* sample, uint8_t* source,
size_t source_size,
DecryptConfig* decrypt_config); DecryptConfig* decrypt_config);
bool EncryptNalFrame(MediaSample* sample, DecryptConfig* decrypt_config); bool EncryptNalFrame(uint8_t* data,
void EncryptBytes(uint8_t* data, size_t size); size_t data_length,
DecryptConfig* decrypt_config);
void EncryptBytes(uint8_t* data,
size_t size);
// Testing injections. // Testing injections.
void InjectVpxParserForTesting(std::unique_ptr<VPxParser> vpx_parser); void InjectVpxParserForTesting(std::unique_ptr<VPxParser> vpx_parser);

View File

@ -271,19 +271,6 @@ class EncryptionHandlerEncryptionTest
return subsamples; return subsamples;
} }
std::unique_ptr<StreamData> GetMediaSampleStreamData(int stream_index,
int64_t timestamp,
int64_t duration) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaSample;
stream_data->media_sample.reset(
new MediaSample(kData, sizeof(kData), nullptr, 0, kIsKeyFrame));
stream_data->media_sample->set_dts(timestamp);
stream_data->media_sample->set_duration(duration);
return stream_data;
}
// Inject vpx parser / video slice header parser if needed. // Inject vpx parser / video slice header parser if needed.
void InjectCodecParser() { void InjectCodecParser() {
switch (codec_) { switch (codec_) {
@ -480,7 +467,15 @@ TEST_P(EncryptionHandlerEncryptionTest, ClearLeadWithNoKeyRotation) {
EXPECT_CALL(mock_key_source_, GetKey(_, _)) EXPECT_CALL(mock_key_source_, GetKey(_, _))
.WillOnce( .WillOnce(
DoAll(SetArgPointee<1>(mock_encryption_key), Return(Status::OK))); DoAll(SetArgPointee<1>(mock_encryption_key), Return(Status::OK)));
ASSERT_OK(Process(GetStreamInfoStreamData(kStreamIndex, codec_, kTimeScale)));
if (IsVideoCodec(codec_)) {
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex, GetVideoStreamInfo(kTimeScale, codec_))));
} else {
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex, GetAudioStreamInfo(kTimeScale, codec_))));
}
EXPECT_THAT(GetOutputStreamDataVector(), EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted))); ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted)));
const StreamInfo* stream_info = const StreamInfo* stream_info =
@ -500,10 +495,17 @@ TEST_P(EncryptionHandlerEncryptionTest, ClearLeadWithNoKeyRotation) {
// There are three segments. Only the third segment is encrypted. // There are three segments. Only the third segment is encrypted.
for (int i = 0; i < 3; ++i) { for (int i = 0; i < 3; ++i) {
// Use single-frame segment for testing. // Use single-frame segment for testing.
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex, i * kSegmentDuration, kSegmentDuration))); kStreamIndex,
ASSERT_OK(Process(GetSegmentInfoStreamData( GetMediaSample(
kStreamIndex, i * kSegmentDuration, kSegmentDuration, !kIsSubsegment))); i * kSegmentDuration,
kSegmentDuration,
kIsKeyFrame,
kData,
sizeof(kData)))));
ASSERT_OK(Process(StreamData::FromSegmentInfo(
kStreamIndex,
GetSegmentInfo(i * kSegmentDuration, kSegmentDuration, !kIsSubsegment))));
const bool is_encrypted = i == 2; const bool is_encrypted = i == 2;
const auto& output_stream_data = GetOutputStreamDataVector(); const auto& output_stream_data = GetOutputStreamDataVector();
EXPECT_THAT(output_stream_data, EXPECT_THAT(output_stream_data,
@ -531,7 +533,14 @@ TEST_P(EncryptionHandlerEncryptionTest, ClearLeadWithKeyRotation) {
encryption_params.vp9_subsample_encryption = vp9_subsample_encryption_; encryption_params.vp9_subsample_encryption = vp9_subsample_encryption_;
SetUpEncryptionHandler(encryption_params); SetUpEncryptionHandler(encryption_params);
ASSERT_OK(Process(GetStreamInfoStreamData(kStreamIndex, codec_, kTimeScale))); if (IsVideoCodec(codec_)) {
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex, GetVideoStreamInfo(kTimeScale, codec_))));
} else {
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex, GetAudioStreamInfo(kTimeScale, codec_))));
}
EXPECT_THAT(GetOutputStreamDataVector(), EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted))); ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted)));
const StreamInfo* stream_info = const StreamInfo* stream_info =
@ -558,10 +567,17 @@ TEST_P(EncryptionHandlerEncryptionTest, ClearLeadWithKeyRotation) {
Return(Status::OK))); Return(Status::OK)));
} }
// Use single-frame segment for testing. // Use single-frame segment for testing.
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex, i * kSegmentDuration, kSegmentDuration))); kStreamIndex,
ASSERT_OK(Process(GetSegmentInfoStreamData( GetMediaSample(
kStreamIndex, i * kSegmentDuration, kSegmentDuration, !kIsSubsegment))); i * kSegmentDuration,
kSegmentDuration,
kIsKeyFrame,
kData,
sizeof(kData)))));
ASSERT_OK(Process(StreamData::FromSegmentInfo(
kStreamIndex,
GetSegmentInfo(i * kSegmentDuration, kSegmentDuration, !kIsSubsegment))));
const bool is_encrypted = i >= 2; const bool is_encrypted = i >= 2;
const auto& output_stream_data = GetOutputStreamDataVector(); const auto& output_stream_data = GetOutputStreamDataVector();
EXPECT_THAT(output_stream_data, EXPECT_THAT(output_stream_data,
@ -592,7 +608,14 @@ TEST_P(EncryptionHandlerEncryptionTest, Encrypt) {
.WillOnce( .WillOnce(
DoAll(SetArgPointee<1>(mock_encryption_key), Return(Status::OK))); DoAll(SetArgPointee<1>(mock_encryption_key), Return(Status::OK)));
ASSERT_OK(Process(GetStreamInfoStreamData(kStreamIndex, codec_, kTimeScale))); if (IsVideoCodec(codec_)) {
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex, GetVideoStreamInfo(kTimeScale, codec_))));
} else {
ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex, GetAudioStreamInfo(kTimeScale, codec_))));
}
EXPECT_THAT(GetOutputStreamDataVector(), EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted))); ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted)));
const StreamInfo* stream_info = const StreamInfo* stream_info =
@ -608,8 +631,14 @@ TEST_P(EncryptionHandlerEncryptionTest, Encrypt) {
stream_data->media_sample.reset( stream_data->media_sample.reset(
new MediaSample(kData, sizeof(kData), nullptr, 0, kIsKeyFrame)); new MediaSample(kData, sizeof(kData), nullptr, 0, kIsKeyFrame));
ASSERT_OK( ASSERT_OK(Process(StreamData::FromMediaSample(
Process(GetMediaSampleStreamData(kStreamIndex, 0, kSampleDuration))); kStreamIndex,
GetMediaSample(
0,
kSampleDuration,
kIsKeyFrame,
kData,
sizeof(kData)))));
ASSERT_EQ(2u, GetOutputStreamDataVector().size()); ASSERT_EQ(2u, GetOutputStreamDataVector().size());
ASSERT_EQ(kStreamIndex, GetOutputStreamDataVector().back()->stream_index); ASSERT_EQ(kStreamIndex, GetOutputStreamDataVector().back()->stream_index);
ASSERT_EQ(StreamDataType::kMediaSample, ASSERT_EQ(StreamDataType::kMediaSample,
@ -625,12 +654,14 @@ TEST_P(EncryptionHandlerEncryptionTest, Encrypt) {
EXPECT_EQ(GetExpectedCryptByteBlock(), decrypt_config->crypt_byte_block()); EXPECT_EQ(GetExpectedCryptByteBlock(), decrypt_config->crypt_byte_block());
EXPECT_EQ(GetExpectedSkipByteBlock(), decrypt_config->skip_byte_block()); EXPECT_EQ(GetExpectedSkipByteBlock(), decrypt_config->skip_byte_block());
ASSERT_TRUE(Decrypt(*decrypt_config, media_sample->writable_data(), std::vector<uint8_t> expected(
media_sample->data_size())); kData,
EXPECT_EQ( kData + sizeof(kData));
std::vector<uint8_t>(kData, kData + sizeof(kData)), std::vector<uint8_t> actual(
std::vector<uint8_t>(media_sample->data(), media_sample->data(),
media_sample->data() + media_sample->data_size())); media_sample->data() + media_sample->data_size());
ASSERT_TRUE(Decrypt(*decrypt_config, actual.data(), actual.size()));
EXPECT_EQ(expected, actual);
} }
INSTANTIATE_TEST_CASE_P( INSTANTIATE_TEST_CASE_P(
@ -665,12 +696,16 @@ TEST_F(EncryptionHandlerTrackTypeTest, AudioTrackType) {
EXPECT_CALL(mock_key_source_, GetKey(kAudioStreamLabel, _)) EXPECT_CALL(mock_key_source_, GetKey(kAudioStreamLabel, _))
.WillOnce( .WillOnce(
DoAll(SetArgPointee<1>(GetMockEncryptionKey()), Return(Status::OK))); DoAll(SetArgPointee<1>(GetMockEncryptionKey()), Return(Status::OK)));
ASSERT_OK(Process(GetAudioStreamInfoStreamData(kStreamIndex, kTimeScale))); ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex,
GetAudioStreamInfo(kTimeScale))));
EXPECT_EQ(EncryptionParams::EncryptedStreamAttributes::kAudio, EXPECT_EQ(EncryptionParams::EncryptedStreamAttributes::kAudio,
captured_stream_attributes.stream_type); captured_stream_attributes.stream_type);
} }
TEST_F(EncryptionHandlerTrackTypeTest, VideoTrackType) { TEST_F(EncryptionHandlerTrackTypeTest, VideoTrackType) {
const int32_t kWidth = 12;
const int32_t kHeight = 34;
EncryptionParams::EncryptedStreamAttributes captured_stream_attributes; EncryptionParams::EncryptedStreamAttributes captured_stream_attributes;
EncryptionParams encryption_params; EncryptionParams encryption_params;
encryption_params.stream_label_func = encryption_params.stream_label_func =
@ -684,19 +719,13 @@ TEST_F(EncryptionHandlerTrackTypeTest, VideoTrackType) {
EXPECT_CALL(mock_key_source_, GetKey(kSdVideoStreamLabel, _)) EXPECT_CALL(mock_key_source_, GetKey(kSdVideoStreamLabel, _))
.WillOnce( .WillOnce(
DoAll(SetArgPointee<1>(GetMockEncryptionKey()), Return(Status::OK))); DoAll(SetArgPointee<1>(GetMockEncryptionKey()), Return(Status::OK)));
std::unique_ptr<StreamData> stream_data = ASSERT_OK(Process(StreamData::FromStreamInfo(
GetVideoStreamInfoStreamData(kStreamIndex, kTimeScale); kStreamIndex,
VideoStreamInfo* video_stream_info = GetVideoStreamInfo(kTimeScale, kWidth, kHeight))));
reinterpret_cast<VideoStreamInfo*>(stream_data->stream_info.get());
video_stream_info->set_width(12);
video_stream_info->set_height(34);
ASSERT_OK(Process(std::move(stream_data)));
EXPECT_EQ(EncryptionParams::EncryptedStreamAttributes::kVideo, EXPECT_EQ(EncryptionParams::EncryptedStreamAttributes::kVideo,
captured_stream_attributes.stream_type); captured_stream_attributes.stream_type);
EXPECT_EQ(video_stream_info->width(), EXPECT_EQ(captured_stream_attributes.oneof.video.width, kWidth);
captured_stream_attributes.oneof.video.width); EXPECT_EQ(captured_stream_attributes.oneof.video.height, kHeight);
EXPECT_EQ(video_stream_info->height(),
captured_stream_attributes.oneof.video.height);
} }
} // namespace media } // namespace media

View File

@ -65,21 +65,21 @@ bool PesPacketGenerator::Initialize(const StreamInfo& stream_info) {
return false; return false;
} }
bool PesPacketGenerator::PushSample(std::shared_ptr<MediaSample> sample) { bool PesPacketGenerator::PushSample(const MediaSample& sample) {
if (!current_processing_pes_) if (!current_processing_pes_)
current_processing_pes_.reset(new PesPacket()); current_processing_pes_.reset(new PesPacket());
current_processing_pes_->set_pts(timescale_scale_ * sample->pts()); current_processing_pes_->set_pts(timescale_scale_ * sample.pts());
current_processing_pes_->set_dts(timescale_scale_ * sample->dts()); current_processing_pes_->set_dts(timescale_scale_ * sample.dts());
if (stream_type_ == kStreamVideo) { if (stream_type_ == kStreamVideo) {
DCHECK(converter_); DCHECK(converter_);
std::vector<SubsampleEntry> subsamples; std::vector<SubsampleEntry> subsamples;
if (sample->decrypt_config()) if (sample.decrypt_config())
subsamples = sample->decrypt_config()->subsamples(); subsamples = sample.decrypt_config()->subsamples();
const bool kEscapeEncryptedNalu = true; const bool kEscapeEncryptedNalu = true;
std::vector<uint8_t> byte_stream; std::vector<uint8_t> byte_stream;
if (!converter_->ConvertUnitToByteStreamWithSubsamples( if (!converter_->ConvertUnitToByteStreamWithSubsamples(
sample->data(), sample->data_size(), sample->is_key_frame(), sample.data(), sample.data_size(), sample.is_key_frame(),
kEscapeEncryptedNalu, &byte_stream, &subsamples)) { kEscapeEncryptedNalu, &byte_stream, &subsamples)) {
LOG(ERROR) << "Failed to convert sample to byte stream."; LOG(ERROR) << "Failed to convert sample to byte stream.";
return false; return false;
@ -93,8 +93,8 @@ bool PesPacketGenerator::PushSample(std::shared_ptr<MediaSample> sample) {
DCHECK_EQ(stream_type_, kStreamAudio); DCHECK_EQ(stream_type_, kStreamAudio);
DCHECK(adts_converter_); DCHECK(adts_converter_);
std::vector<uint8_t> aac_frame(sample->data(), std::vector<uint8_t> aac_frame(sample.data(),
sample->data() + sample->data_size()); sample.data() + sample.data_size());
// TODO(rkuroiwa): ConvertToADTS() makes another copy of aac_frame internally. // TODO(rkuroiwa): ConvertToADTS() makes another copy of aac_frame internally.
// Optimize copying in this function, possibly by adding a method on // Optimize copying in this function, possibly by adding a method on

View File

@ -42,7 +42,7 @@ class PesPacketGenerator {
/// NumberOfReadyPesPackets(). /// NumberOfReadyPesPackets().
/// If this returns false, the object may end up in an undefined state. /// If this returns false, the object may end up in an undefined state.
/// @return true on success, false otherwise. /// @return true on success, false otherwise.
virtual bool PushSample(std::shared_ptr<MediaSample> sample); virtual bool PushSample(const MediaSample& sample);
/// @return The number of PES packets that are ready to be consumed. /// @return The number of PES packets that are ready to be consumed.
virtual size_t NumberOfReadyPesPackets(); virtual size_t NumberOfReadyPesPackets();

View File

@ -205,7 +205,7 @@ TEST_F(PesPacketGeneratorTest, AddVideoSample) {
UseMockNalUnitToByteStreamConverter(std::move(mock)); UseMockNalUnitToByteStreamConverter(std::move(mock));
EXPECT_TRUE(generator_.PushSample(sample)); EXPECT_TRUE(generator_.PushSample(*sample));
EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets()); EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets());
std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket(); std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket();
ASSERT_TRUE(pes_packet); ASSERT_TRUE(pes_packet);
@ -252,7 +252,7 @@ TEST_F(PesPacketGeneratorTest, AddEncryptedVideoSample) {
UseMockNalUnitToByteStreamConverter(std::move(mock)); UseMockNalUnitToByteStreamConverter(std::move(mock));
EXPECT_TRUE(generator_.PushSample(sample)); EXPECT_TRUE(generator_.PushSample(*sample));
EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets()); EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets());
std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket(); std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket();
ASSERT_TRUE(pes_packet); ASSERT_TRUE(pes_packet);
@ -285,7 +285,7 @@ TEST_F(PesPacketGeneratorTest, AddVideoSampleFailedToConvert) {
UseMockNalUnitToByteStreamConverter(std::move(mock)); UseMockNalUnitToByteStreamConverter(std::move(mock));
EXPECT_FALSE(generator_.PushSample(sample)); EXPECT_FALSE(generator_.PushSample(*sample));
EXPECT_EQ(0u, generator_.NumberOfReadyPesPackets()); EXPECT_EQ(0u, generator_.NumberOfReadyPesPackets());
EXPECT_TRUE(generator_.Flush()); EXPECT_TRUE(generator_.Flush());
} }
@ -308,7 +308,7 @@ TEST_F(PesPacketGeneratorTest, AddAudioSample) {
UseMockAACAudioSpecificConfig(std::move(mock)); UseMockAACAudioSpecificConfig(std::move(mock));
EXPECT_TRUE(generator_.PushSample(sample)); EXPECT_TRUE(generator_.PushSample(*sample));
EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets()); EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets());
std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket(); std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket();
ASSERT_TRUE(pes_packet); ASSERT_TRUE(pes_packet);
@ -335,7 +335,7 @@ TEST_F(PesPacketGeneratorTest, AddAudioSampleFailedToConvert) {
UseMockAACAudioSpecificConfig(std::move(mock)); UseMockAACAudioSpecificConfig(std::move(mock));
EXPECT_FALSE(generator_.PushSample(sample)); EXPECT_FALSE(generator_.PushSample(*sample));
EXPECT_EQ(0u, generator_.NumberOfReadyPesPackets()); EXPECT_EQ(0u, generator_.NumberOfReadyPesPackets());
EXPECT_TRUE(generator_.Flush()); EXPECT_TRUE(generator_.Flush());
} }
@ -369,7 +369,7 @@ TEST_F(PesPacketGeneratorTest, TimeStampScaling) {
UseMockNalUnitToByteStreamConverter(std::move(mock)); UseMockNalUnitToByteStreamConverter(std::move(mock));
EXPECT_TRUE(generator_.PushSample(sample)); EXPECT_TRUE(generator_.PushSample(*sample));
EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets()); EXPECT_EQ(1u, generator_.NumberOfReadyPesPackets());
std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket(); std::unique_ptr<PesPacket> pes_packet = generator_.GetNextPesPacket();
ASSERT_TRUE(pes_packet); ASSERT_TRUE(pes_packet);

View File

@ -32,19 +32,18 @@ Status TsMuxer::Finalize() {
return segmenter_->Finalize(); return segmenter_->Finalize();
} }
Status TsMuxer::AddSample(size_t stream_id, Status TsMuxer::AddSample(size_t stream_id, const MediaSample& sample) {
std::shared_ptr<MediaSample> sample) {
DCHECK_EQ(stream_id, 0u); DCHECK_EQ(stream_id, 0u);
return segmenter_->AddSample(sample); return segmenter_->AddSample(sample);
} }
Status TsMuxer::FinalizeSegment(size_t stream_id, Status TsMuxer::FinalizeSegment(size_t stream_id,
std::shared_ptr<SegmentInfo> segment_info) { const SegmentInfo& segment_info) {
DCHECK_EQ(stream_id, 0u); DCHECK_EQ(stream_id, 0u);
return segment_info->is_subsegment return segment_info.is_subsegment
? Status::OK ? Status::OK
: segmenter_->FinalizeSegment(segment_info->start_timestamp, : segmenter_->FinalizeSegment(segment_info.start_timestamp,
segment_info->duration); segment_info.duration);
} }
void TsMuxer::FireOnMediaStartEvent() { void TsMuxer::FireOnMediaStartEvent() {

View File

@ -27,9 +27,9 @@ class TsMuxer : public Muxer {
Status InitializeMuxer() override; Status InitializeMuxer() override;
Status Finalize() override; Status Finalize() override;
Status AddSample(size_t stream_id, Status AddSample(size_t stream_id,
std::shared_ptr<MediaSample> sample) override; const MediaSample& sample) override;
Status FinalizeSegment(size_t stream_id, Status FinalizeSegment(size_t stream_id,
std::shared_ptr<SegmentInfo> sample) override; const SegmentInfo& sample) override;
void FireOnMediaStartEvent(); void FireOnMediaStartEvent();
void FireOnMediaEndEvent(); void FireOnMediaEndEvent();

View File

@ -45,11 +45,11 @@ Status TsSegmenter::Finalize() {
return Status::OK; return Status::OK;
} }
Status TsSegmenter::AddSample(std::shared_ptr<MediaSample> sample) { Status TsSegmenter::AddSample(const MediaSample& sample) {
if (sample->is_encrypted()) if (sample.is_encrypted())
ts_writer_->SignalEncrypted(); ts_writer_->SignalEncrypted();
if (!ts_writer_file_opened_ && !sample->is_key_frame()) if (!ts_writer_file_opened_ && !sample.is_key_frame())
LOG(WARNING) << "A segment will start with a non key frame."; LOG(WARNING) << "A segment will start with a non key frame.";
if (!pes_packet_generator_->PushSample(sample)) { if (!pes_packet_generator_->PushSample(sample)) {

View File

@ -46,7 +46,7 @@ class TsSegmenter {
/// @param sample gets added to this object. /// @param sample gets added to this object.
/// @return OK on success. /// @return OK on success.
Status AddSample(std::shared_ptr<MediaSample> sample); Status AddSample(const MediaSample& sample);
/// Flush all the samples that are (possibly) buffered and write them to the /// Flush all the samples that are (possibly) buffered and write them to the
/// current segment, this will close the file. If a file is not already opened /// current segment, this will close the file. If a file is not already opened

View File

@ -52,7 +52,7 @@ const uint8_t kAnyData[] = {
class MockPesPacketGenerator : public PesPacketGenerator { class MockPesPacketGenerator : public PesPacketGenerator {
public: public:
MOCK_METHOD1(Initialize, bool(const StreamInfo& info)); MOCK_METHOD1(Initialize, bool(const StreamInfo& info));
MOCK_METHOD1(PushSample, bool(std::shared_ptr<MediaSample> sample)); MOCK_METHOD1(PushSample, bool(const MediaSample& sample));
MOCK_METHOD0(NumberOfReadyPesPackets, size_t()); MOCK_METHOD0(NumberOfReadyPesPackets, size_t());
@ -161,7 +161,7 @@ TEST_F(TsSegmenterTest, AddSample) {
std::move(mock_pes_packet_generator_)); std::move(mock_pes_packet_generator_));
EXPECT_OK(segmenter.Initialize(*stream_info)); EXPECT_OK(segmenter.Initialize(*stream_info));
EXPECT_OK(segmenter.AddSample(sample)); EXPECT_OK(segmenter.AddSample(*sample));
} }
// This will add one sample then finalize segment then add another sample. // This will add one sample then finalize segment then add another sample.
@ -257,9 +257,9 @@ TEST_F(TsSegmenterTest, PassedSegmentDuration) {
segmenter.InjectPesPacketGeneratorForTesting( segmenter.InjectPesPacketGeneratorForTesting(
std::move(mock_pes_packet_generator_)); std::move(mock_pes_packet_generator_));
EXPECT_OK(segmenter.Initialize(*stream_info)); EXPECT_OK(segmenter.Initialize(*stream_info));
EXPECT_OK(segmenter.AddSample(sample1)); EXPECT_OK(segmenter.AddSample(*sample1));
EXPECT_OK(segmenter.FinalizeSegment(kFirstPts, sample1->duration())); EXPECT_OK(segmenter.FinalizeSegment(kFirstPts, sample1->duration()));
EXPECT_OK(segmenter.AddSample(sample2)); EXPECT_OK(segmenter.AddSample(*sample2));
} }
// Finalize right after Initialize(). The writer will not be initialized. // Finalize right after Initialize(). The writer will not be initialized.
@ -395,13 +395,13 @@ TEST_F(TsSegmenterTest, EncryptedSample) {
std::move(mock_pes_packet_generator_)); std::move(mock_pes_packet_generator_));
EXPECT_OK(segmenter.Initialize(*stream_info)); EXPECT_OK(segmenter.Initialize(*stream_info));
EXPECT_OK(segmenter.AddSample(sample1)); EXPECT_OK(segmenter.AddSample(*sample1));
EXPECT_OK(segmenter.FinalizeSegment(1, sample1->duration())); EXPECT_OK(segmenter.FinalizeSegment(1, sample1->duration()));
// Signal encrypted if sample is encrypted. // Signal encrypted if sample is encrypted.
EXPECT_CALL(*mock_ts_writer_raw, SignalEncrypted()); EXPECT_CALL(*mock_ts_writer_raw, SignalEncrypted());
sample2->set_is_encrypted(true); sample2->set_is_encrypted(true);
EXPECT_OK(segmenter.AddSample(sample2)); EXPECT_OK(segmenter.AddSample(*sample2));
} }
} // namespace mp2t } // namespace mp2t

View File

@ -44,7 +44,7 @@ void NewSampleEncryptionEntry(const DecryptConfig& decrypt_config,
} // namespace } // namespace
Fragmenter::Fragmenter(std::shared_ptr<StreamInfo> stream_info, Fragmenter::Fragmenter(std::shared_ptr<const StreamInfo> stream_info,
TrackFragment* traf) TrackFragment* traf)
: stream_info_(std::move(stream_info)), : stream_info_(std::move(stream_info)),
use_decoding_timestamp_in_timeline_(false), use_decoding_timestamp_in_timeline_(false),
@ -61,40 +61,39 @@ Fragmenter::Fragmenter(std::shared_ptr<StreamInfo> stream_info,
Fragmenter::~Fragmenter() {} Fragmenter::~Fragmenter() {}
Status Fragmenter::AddSample(std::shared_ptr<MediaSample> sample) { Status Fragmenter::AddSample(const MediaSample& sample) {
DCHECK(sample); if (sample.duration() == 0) {
if (sample->duration() == 0) {
LOG(WARNING) << "Unexpected sample with zero duration @ dts " LOG(WARNING) << "Unexpected sample with zero duration @ dts "
<< sample->dts(); << sample.dts();
} }
if (!fragment_initialized_) { if (!fragment_initialized_) {
Status status = InitializeFragment(sample->dts()); Status status = InitializeFragment(sample.dts());
if (!status.ok()) if (!status.ok())
return status; return status;
} }
if (sample->side_data_size() > 0) if (sample.side_data_size() > 0)
LOG(WARNING) << "MP4 samples do not support side data. Side data ignored."; LOG(WARNING) << "MP4 samples do not support side data. Side data ignored.";
// Fill in sample parameters. It will be optimized later. // Fill in sample parameters. It will be optimized later.
traf_->runs[0].sample_sizes.push_back( traf_->runs[0].sample_sizes.push_back(
static_cast<uint32_t>(sample->data_size())); static_cast<uint32_t>(sample.data_size()));
traf_->runs[0].sample_durations.push_back(sample->duration()); traf_->runs[0].sample_durations.push_back(sample.duration());
traf_->runs[0].sample_flags.push_back( traf_->runs[0].sample_flags.push_back(
sample->is_key_frame() ? 0 : TrackFragmentHeader::kNonKeySampleMask); sample.is_key_frame() ? 0 : TrackFragmentHeader::kNonKeySampleMask);
if (sample->decrypt_config()) { if (sample.decrypt_config()) {
NewSampleEncryptionEntry( NewSampleEncryptionEntry(
*sample->decrypt_config(), *sample.decrypt_config(),
!stream_info_->encryption_config().constant_iv.empty(), traf_); !stream_info_->encryption_config().constant_iv.empty(), traf_);
} }
data_->AppendArray(sample->data(), sample->data_size()); data_->AppendArray(sample.data(), sample.data_size());
fragment_duration_ += sample->duration(); fragment_duration_ += sample.duration();
const int64_t pts = sample->pts(); const int64_t pts = sample.pts();
const int64_t dts = sample->dts(); const int64_t dts = sample.dts();
const int64_t timestamp = use_decoding_timestamp_in_timeline_ ? dts : pts; const int64_t timestamp = use_decoding_timestamp_in_timeline_ ? dts : pts;
// Set |earliest_presentation_time_| to |timestamp| if |timestamp| is smaller // Set |earliest_presentation_time_| to |timestamp| if |timestamp| is smaller
@ -106,7 +105,7 @@ Status Fragmenter::AddSample(std::shared_ptr<MediaSample> sample) {
if (pts != dts) if (pts != dts)
traf_->runs[0].flags |= TrackFragmentRun::kSampleCompTimeOffsetsPresentMask; traf_->runs[0].flags |= TrackFragmentRun::kSampleCompTimeOffsetsPresentMask;
if (sample->is_key_frame()) { if (sample.is_key_frame()) {
if (first_sap_time_ == kInvalidTime) if (first_sap_time_ == kInvalidTime)
first_sap_time_ = pts; first_sap_time_ = pts;
} }

View File

@ -31,14 +31,14 @@ class Fragmenter {
public: public:
/// @param info contains stream information. /// @param info contains stream information.
/// @param traf points to a TrackFragment box. /// @param traf points to a TrackFragment box.
Fragmenter(std::shared_ptr<StreamInfo> info, TrackFragment* traf); Fragmenter(std::shared_ptr<const StreamInfo> info, TrackFragment* traf);
~Fragmenter(); ~Fragmenter();
/// Add a sample to the fragmenter. /// Add a sample to the fragmenter.
/// @param sample points to the sample to be added. /// @param sample points to the sample to be added.
/// @return OK on success, an error status otherwise. /// @return OK on success, an error status otherwise.
Status AddSample(std::shared_ptr<MediaSample> sample); Status AddSample(const MediaSample& sample);
/// Initialize the fragment with default data. /// Initialize the fragment with default data.
/// @param first_sample_dts specifies the decoding timestamp for the first /// @param first_sample_dts specifies the decoding timestamp for the first
@ -86,7 +86,7 @@ class Fragmenter {
// Check if the current fragment starts with SAP. // Check if the current fragment starts with SAP.
bool StartsWithSAP(); bool StartsWithSAP();
std::shared_ptr<StreamInfo> stream_info_; std::shared_ptr<const StreamInfo> stream_info_;
bool use_decoding_timestamp_in_timeline_; bool use_decoding_timestamp_in_timeline_;
TrackFragment* traf_; TrackFragment* traf_;
uint64_t seek_preroll_; uint64_t seek_preroll_;

View File

@ -124,7 +124,7 @@ Status MP4Muxer::InitializeMuxer() {
if (streams()[0]->stream_type() == kStreamVideo) { if (streams()[0]->stream_type() == kStreamVideo) {
codec_fourcc = codec_fourcc =
CodecToFourCC(streams()[0]->codec(), CodecToFourCC(streams()[0]->codec(),
static_cast<VideoStreamInfo*>(streams()[0].get()) static_cast<const VideoStreamInfo*>(streams()[0].get())
->h26x_stream_format()); ->h26x_stream_format());
if (codec_fourcc != FOURCC_NULL) if (codec_fourcc != FOURCC_NULL)
ftyp->compatible_brands.push_back(codec_fourcc); ftyp->compatible_brands.push_back(codec_fourcc);
@ -155,16 +155,22 @@ Status MP4Muxer::InitializeMuxer() {
switch (streams()[i]->stream_type()) { switch (streams()[i]->stream_type()) {
case kStreamVideo: case kStreamVideo:
GenerateVideoTrak(static_cast<VideoStreamInfo*>(streams()[i].get()), GenerateVideoTrak(
&trak, i + 1); static_cast<const VideoStreamInfo*>(streams()[i].get()),
&trak,
i + 1);
break; break;
case kStreamAudio: case kStreamAudio:
GenerateAudioTrak(static_cast<AudioStreamInfo*>(streams()[i].get()), GenerateAudioTrak(
&trak, i + 1); static_cast<const AudioStreamInfo*>(streams()[i].get()),
&trak,
i + 1);
break; break;
case kStreamText: case kStreamText:
GenerateTextTrak(static_cast<TextStreamInfo*>(streams()[i].get()), GenerateTextTrak(
&trak, i + 1); static_cast<const TextStreamInfo*>(streams()[i].get()),
&trak,
i + 1);
break; break;
default: default:
NOTIMPLEMENTED() << "Not implemented for stream type: " NOTIMPLEMENTED() << "Not implemented for stream type: "
@ -210,19 +216,18 @@ Status MP4Muxer::Finalize() {
return Status::OK; return Status::OK;
} }
Status MP4Muxer::AddSample(size_t stream_id, Status MP4Muxer::AddSample(size_t stream_id, const MediaSample& sample) {
std::shared_ptr<MediaSample> sample) {
DCHECK(segmenter_); DCHECK(segmenter_);
return segmenter_->AddSample(stream_id, sample); return segmenter_->AddSample(stream_id, sample);
} }
Status MP4Muxer::FinalizeSegment(size_t stream_id, Status MP4Muxer::FinalizeSegment(size_t stream_id,
std::shared_ptr<SegmentInfo> segment_info) { const SegmentInfo& segment_info) {
DCHECK(segmenter_); DCHECK(segmenter_);
VLOG(3) << "Finalize " << (segment_info->is_subsegment ? "sub" : "") VLOG(3) << "Finalize " << (segment_info.is_subsegment ? "sub" : "")
<< "segment " << segment_info->start_timestamp << " duration " << "segment " << segment_info.start_timestamp << " duration "
<< segment_info->duration; << segment_info.duration;
return segmenter_->FinalizeSegment(stream_id, std::move(segment_info)); return segmenter_->FinalizeSegment(stream_id, segment_info);
} }
void MP4Muxer::InitializeTrak(const StreamInfo* info, Track* trak) { void MP4Muxer::InitializeTrak(const StreamInfo* info, Track* trak) {

View File

@ -39,10 +39,9 @@ class MP4Muxer : public Muxer {
// Muxer implementation overrides. // Muxer implementation overrides.
Status InitializeMuxer() override; Status InitializeMuxer() override;
Status Finalize() override; Status Finalize() override;
Status AddSample(size_t stream_id, Status AddSample(size_t stream_id, const MediaSample& sample) override;
std::shared_ptr<MediaSample> sample) override;
Status FinalizeSegment(size_t stream_id, Status FinalizeSegment(size_t stream_id,
std::shared_ptr<SegmentInfo> segment_info) override; const SegmentInfo& segment_info) override;
// Generate Audio/Video Track box. // Generate Audio/Video Track box.
void InitializeTrak(const StreamInfo* info, Track* trak); void InitializeTrak(const StreamInfo* info, Track* trak);

View File

@ -47,7 +47,7 @@ Segmenter::Segmenter(const MuxerOptions& options,
Segmenter::~Segmenter() {} Segmenter::~Segmenter() {}
Status Segmenter::Initialize( Status Segmenter::Initialize(
const std::vector<std::shared_ptr<StreamInfo>>& streams, const std::vector<std::shared_ptr<const StreamInfo>>& streams,
MuxerListener* muxer_listener, MuxerListener* muxer_listener,
ProgressListener* progress_listener) { ProgressListener* progress_listener) {
DCHECK_LT(0u, streams.size()); DCHECK_LT(0u, streams.size());
@ -112,12 +112,11 @@ Status Segmenter::Finalize() {
return DoFinalize(); return DoFinalize();
} }
Status Segmenter::AddSample(size_t stream_id, Status Segmenter::AddSample(size_t stream_id, const MediaSample& sample) {
std::shared_ptr<MediaSample> sample) {
// Set default sample duration if it has not been set yet. // Set default sample duration if it has not been set yet.
if (moov_->extends.tracks[stream_id].default_sample_duration == 0) { if (moov_->extends.tracks[stream_id].default_sample_duration == 0) {
moov_->extends.tracks[stream_id].default_sample_duration = moov_->extends.tracks[stream_id].default_sample_duration =
sample->duration(); sample.duration();
} }
DCHECK_LT(stream_id, fragmenters_.size()); DCHECK_LT(stream_id, fragmenters_.size());
@ -132,17 +131,17 @@ Status Segmenter::AddSample(size_t stream_id,
return status; return status;
if (sample_duration_ == 0) if (sample_duration_ == 0)
sample_duration_ = sample->duration(); sample_duration_ = sample.duration();
stream_durations_[stream_id] += sample->duration(); stream_durations_[stream_id] += sample.duration();
return Status::OK; return Status::OK;
} }
Status Segmenter::FinalizeSegment(size_t stream_id, Status Segmenter::FinalizeSegment(size_t stream_id,
std::shared_ptr<SegmentInfo> segment_info) { const SegmentInfo& segment_info) {
if (segment_info->key_rotation_encryption_config) { if (segment_info.key_rotation_encryption_config) {
FinalizeFragmentForKeyRotation( FinalizeFragmentForKeyRotation(
stream_id, segment_info->is_encrypted, stream_id, segment_info.is_encrypted,
*segment_info->key_rotation_encryption_config); *segment_info.key_rotation_encryption_config);
} }
DCHECK_LT(stream_id, fragmenters_.size()); DCHECK_LT(stream_id, fragmenters_.size());
@ -200,7 +199,7 @@ Status Segmenter::FinalizeSegment(size_t stream_id,
for (std::unique_ptr<Fragmenter>& fragmenter : fragmenters_) for (std::unique_ptr<Fragmenter>& fragmenter : fragmenters_)
fragmenter->ClearFragmentFinalized(); fragmenter->ClearFragmentFinalized();
if (!segment_info->is_subsegment) { if (!segment_info.is_subsegment) {
Status status = DoFinalizeSegment(); Status status = DoFinalizeSegment();
// Reset segment information to initial state. // Reset segment information to initial state.
sidx_->references.clear(); sidx_->references.clear();

View File

@ -54,7 +54,8 @@ class Segmenter {
/// @param muxer_listener receives muxer events. Can be NULL. /// @param muxer_listener receives muxer events. Can be NULL.
/// @param progress_listener receives progress updates. Can be NULL. /// @param progress_listener receives progress updates. Can be NULL.
/// @return OK on success, an error status otherwise. /// @return OK on success, an error status otherwise.
Status Initialize(const std::vector<std::shared_ptr<StreamInfo>>& streams, Status Initialize(
const std::vector<std::shared_ptr<const StreamInfo>>& streams,
MuxerListener* muxer_listener, MuxerListener* muxer_listener,
ProgressListener* progress_listener); ProgressListener* progress_listener);
@ -66,14 +67,13 @@ class Segmenter {
/// @param stream_id is the zero-based stream index. /// @param stream_id is the zero-based stream index.
/// @param sample points to the sample to be added. /// @param sample points to the sample to be added.
/// @return OK on success, an error status otherwise. /// @return OK on success, an error status otherwise.
Status AddSample(size_t stream_id, std::shared_ptr<MediaSample> sample); Status AddSample(size_t stream_id, const MediaSample& sample);
/// Finalize the segment / subsegment. /// Finalize the segment / subsegment.
/// @param stream_id is the zero-based stream index. /// @param stream_id is the zero-based stream index.
/// @param is_subsegment indicates if it is a subsegment (fragment). /// @param is_subsegment indicates if it is a subsegment (fragment).
/// @return OK on success, an error status otherwise. /// @return OK on success, an error status otherwise.
Status FinalizeSegment(size_t stream_id, Status FinalizeSegment(size_t stream_id, const SegmentInfo& segment_info);
std::shared_ptr<SegmentInfo> segment_info);
// TODO(rkuroiwa): Change these Get*Range() methods to return // TODO(rkuroiwa): Change these Get*Range() methods to return
// base::Optional<Range> as well. // base::Optional<Range> as well.

View File

@ -209,7 +209,7 @@ class EncryptedSegmenterTest : public SegmentTestBase {
void InitializeSegmenter(const MuxerOptions& options) { void InitializeSegmenter(const MuxerOptions& options) {
ASSERT_NO_FATAL_FAILURE( ASSERT_NO_FATAL_FAILURE(
CreateAndInitializeSegmenter<webm::TwoPassSingleSegmentSegmenter>( CreateAndInitializeSegmenter<webm::TwoPassSingleSegmentSegmenter>(
options, info_.get(), &segmenter_)); options, *info_, &segmenter_));
} }
std::shared_ptr<StreamInfo> info_; std::shared_ptr<StreamInfo> info_;
@ -236,7 +236,7 @@ TEST_F(EncryptedSegmenterTest, BasicSupport) {
std::vector<SubsampleEntry>())); std::vector<SubsampleEntry>()));
sample->set_decrypt_config(std::move(decrypt_config)); sample->set_decrypt_config(std::move(decrypt_config));
} }
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK( ASSERT_OK(
segmenter_->FinalizeSegment(3 * kDuration, 2 * kDuration, !kSubsegment)); segmenter_->FinalizeSegment(3 * kDuration, 2 * kDuration, !kSubsegment));

View File

@ -103,7 +103,7 @@ class MultiSegmentSegmenterTest : public SegmentTestBase {
void InitializeSegmenter(const MuxerOptions& options) { void InitializeSegmenter(const MuxerOptions& options) {
ASSERT_NO_FATAL_FAILURE( ASSERT_NO_FATAL_FAILURE(
CreateAndInitializeSegmenter<webm::MultiSegmentSegmenter>( CreateAndInitializeSegmenter<webm::MultiSegmentSegmenter>(
options, info_.get(), &segmenter_)); options, *info_, &segmenter_));
} }
std::string TemplateFileName(int number) const { std::string TemplateFileName(int number) const {
@ -124,7 +124,7 @@ TEST_F(MultiSegmentSegmenterTest, BasicSupport) {
for (int i = 0; i < 5; i++) { for (int i = 0; i < 5; i++) {
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, kNoSideData); CreateSample(kKeyFrame, kDuration, kNoSideData);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK(segmenter_->FinalizeSegment(0, 8 * kDuration, !kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 8 * kDuration, !kSubsegment));
ASSERT_OK(segmenter_->Finalize()); ASSERT_OK(segmenter_->Finalize());
@ -148,7 +148,7 @@ TEST_F(MultiSegmentSegmenterTest, SplitsFilesOnSegment) {
ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, !kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, !kSubsegment));
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, kNoSideData); CreateSample(kKeyFrame, kDuration, kNoSideData);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK( ASSERT_OK(
segmenter_->FinalizeSegment(5 * kDuration, 8 * kDuration, !kSubsegment)); segmenter_->FinalizeSegment(5 * kDuration, 8 * kDuration, !kSubsegment));
@ -178,7 +178,7 @@ TEST_F(MultiSegmentSegmenterTest, SplitsClustersOnSubsegment) {
ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, kSubsegment));
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, kNoSideData); CreateSample(kKeyFrame, kDuration, kNoSideData);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK(segmenter_->FinalizeSegment(0, 8 * kDuration, !kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 8 * kDuration, !kSubsegment));
ASSERT_OK(segmenter_->Finalize()); ASSERT_OK(segmenter_->Finalize());

View File

@ -75,14 +75,17 @@ Segmenter::Segmenter(const MuxerOptions& options) : options_(options) {}
Segmenter::~Segmenter() {} Segmenter::~Segmenter() {}
Status Segmenter::Initialize(StreamInfo* info, Status Segmenter::Initialize(const StreamInfo& info,
ProgressListener* progress_listener, ProgressListener* progress_listener,
MuxerListener* muxer_listener) { MuxerListener* muxer_listener) {
is_encrypted_ = info.is_encrypted();
duration_ = info.duration();
time_scale_ = info.time_scale();
muxer_listener_ = muxer_listener; muxer_listener_ = muxer_listener;
info_ = info;
// Use media duration as progress target. // Use media duration as progress target.
progress_target_ = info_->duration(); progress_target_ = info.duration();
progress_listener_ = progress_listener; progress_listener_ = progress_listener;
segment_info_.Init(); segment_info_.Init();
@ -106,39 +109,39 @@ Status Segmenter::Initialize(StreamInfo* info,
unsigned int seed = 0; unsigned int seed = 0;
std::unique_ptr<mkvmuxer::Track> track; std::unique_ptr<mkvmuxer::Track> track;
Status status; Status status;
switch (info_->stream_type()) { switch (info.stream_type()) {
case kStreamVideo: { case kStreamVideo: {
std::unique_ptr<VideoTrack> video_track(new VideoTrack(&seed)); std::unique_ptr<VideoTrack> video_track(new VideoTrack(&seed));
status = InitializeVideoTrack(static_cast<VideoStreamInfo*>(info_), status = InitializeVideoTrack(static_cast<const VideoStreamInfo&>(info),
video_track.get()); video_track.get());
track = std::move(video_track); track = std::move(video_track);
break; break;
} }
case kStreamAudio: { case kStreamAudio: {
std::unique_ptr<AudioTrack> audio_track(new AudioTrack(&seed)); std::unique_ptr<AudioTrack> audio_track(new AudioTrack(&seed));
status = InitializeAudioTrack(static_cast<AudioStreamInfo*>(info_), status = InitializeAudioTrack(static_cast<const AudioStreamInfo&>(info),
audio_track.get()); audio_track.get());
track = std::move(audio_track); track = std::move(audio_track);
break; break;
} }
default: default:
NOTIMPLEMENTED() << "Not implemented for stream type: " NOTIMPLEMENTED() << "Not implemented for stream type: "
<< info_->stream_type(); << info.stream_type();
status = Status(error::UNIMPLEMENTED, "Not implemented for stream type"); status = Status(error::UNIMPLEMENTED, "Not implemented for stream type");
} }
if (!status.ok()) if (!status.ok())
return status; return status;
if (info_->is_encrypted()) { if (info.is_encrypted()) {
if (info->encryption_config().per_sample_iv_size != kWebMIvSize) if (info.encryption_config().per_sample_iv_size != kWebMIvSize)
return Status(error::MUXER_FAILURE, "Incorrect size WebM encryption IV."); return Status(error::MUXER_FAILURE, "Incorrect size WebM encryption IV.");
status = UpdateTrackForEncryption(info_->encryption_config().key_id, status = UpdateTrackForEncryption(info.encryption_config().key_id,
track.get()); track.get());
if (!status.ok()) if (!status.ok())
return status; return status;
} }
tracks_.AddTrack(track.get(), info_->track_id()); tracks_.AddTrack(track.get(), info.track_id());
// number() is only available after the above instruction. // number() is only available after the above instruction.
track_id_ = track->number(); track_id_ = track->number();
// |tracks_| owns |track|. // |tracks_| owns |track|.
@ -153,7 +156,9 @@ Status Segmenter::Finalize() {
return DoFinalize(); return DoFinalize();
} }
Status Segmenter::AddSample(std::shared_ptr<MediaSample> sample) { Status Segmenter::AddSample(const MediaSample& source_sample) {
std::shared_ptr<MediaSample> sample = MediaSample::CopyFrom(source_sample);
if (sample_duration_ == 0) { if (sample_duration_ == 0) {
first_timestamp_ = sample->pts(); first_timestamp_ = sample->pts();
sample_duration_ = sample->duration(); sample_duration_ = sample->duration();
@ -178,7 +183,7 @@ Status Segmenter::AddSample(std::shared_ptr<MediaSample> sample) {
if (!status.ok()) if (!status.ok())
return status; return status;
if (info_->is_encrypted()) if (is_encrypted_)
UpdateFrameForEncryption(sample.get()); UpdateFrameForEncryption(sample.get());
new_subsegment_ = false; new_subsegment_ = false;
@ -205,14 +210,14 @@ float Segmenter::GetDurationInSeconds() const {
uint64_t Segmenter::FromBmffTimestamp(uint64_t bmff_timestamp) { uint64_t Segmenter::FromBmffTimestamp(uint64_t bmff_timestamp) {
return NsToWebMTimecode( return NsToWebMTimecode(
BmffTimestampToNs(bmff_timestamp, info_->time_scale()), BmffTimestampToNs(bmff_timestamp, time_scale_),
segment_info_.timecode_scale()); segment_info_.timecode_scale());
} }
uint64_t Segmenter::FromWebMTimecode(uint64_t webm_timecode) { uint64_t Segmenter::FromWebMTimecode(uint64_t webm_timecode) {
return NsToBmffTimestamp( return NsToBmffTimestamp(
WebMTimecodeToNs(webm_timecode, segment_info_.timecode_scale()), WebMTimecodeToNs(webm_timecode, segment_info_.timecode_scale()),
info_->time_scale()); time_scale_);
} }
Status Segmenter::WriteSegmentHeader(uint64_t file_size, MkvWriter* writer) { Status Segmenter::WriteSegmentHeader(uint64_t file_size, MkvWriter* writer) {
@ -277,17 +282,17 @@ void Segmenter::UpdateProgress(uint64_t progress) {
} }
} }
Status Segmenter::InitializeVideoTrack(const VideoStreamInfo* info, Status Segmenter::InitializeVideoTrack(const VideoStreamInfo& info,
VideoTrack* track) { VideoTrack* track) {
if (info->codec() == kCodecVP8) { if (info.codec() == kCodecVP8) {
track->set_codec_id(mkvmuxer::Tracks::kVp8CodecId); track->set_codec_id(mkvmuxer::Tracks::kVp8CodecId);
} else if (info->codec() == kCodecVP9) { } else if (info.codec() == kCodecVP9) {
track->set_codec_id(mkvmuxer::Tracks::kVp9CodecId); track->set_codec_id(mkvmuxer::Tracks::kVp9CodecId);
// The |StreamInfo::codec_config| field is stored using the MP4 format; we // The |StreamInfo::codec_config| field is stored using the MP4 format; we
// need to convert it to the WebM format. // need to convert it to the WebM format.
VPCodecConfigurationRecord vp_config; VPCodecConfigurationRecord vp_config;
if (!vp_config.ParseMP4(info->codec_config())) { if (!vp_config.ParseMP4(info.codec_config())) {
return Status(error::INTERNAL_ERROR, return Status(error::INTERNAL_ERROR,
"Unable to parse VP9 codec configuration"); "Unable to parse VP9 codec configuration");
} }
@ -319,43 +324,43 @@ Status Segmenter::InitializeVideoTrack(const VideoStreamInfo* info,
"Only VP8 and VP9 video codecs are supported in WebM."); "Only VP8 and VP9 video codecs are supported in WebM.");
} }
track->set_uid(info->track_id()); track->set_uid(info.track_id());
if (!info->language().empty()) if (!info.language().empty())
track->set_language(info->language().c_str()); track->set_language(info.language().c_str());
track->set_type(mkvmuxer::Tracks::kVideo); track->set_type(mkvmuxer::Tracks::kVideo);
track->set_width(info->width()); track->set_width(info.width());
track->set_height(info->height()); track->set_height(info.height());
track->set_display_height(info->height()); track->set_display_height(info.height());
track->set_display_width(info->width() * info->pixel_width() / track->set_display_width(info.width() * info.pixel_width() /
info->pixel_height()); info.pixel_height());
return Status::OK; return Status::OK;
} }
Status Segmenter::InitializeAudioTrack(const AudioStreamInfo* info, Status Segmenter::InitializeAudioTrack(const AudioStreamInfo& info,
AudioTrack* track) { AudioTrack* track) {
if (info->codec() == kCodecOpus) { if (info.codec() == kCodecOpus) {
track->set_codec_id(mkvmuxer::Tracks::kOpusCodecId); track->set_codec_id(mkvmuxer::Tracks::kOpusCodecId);
} else if (info->codec() == kCodecVorbis) { } else if (info.codec() == kCodecVorbis) {
track->set_codec_id(mkvmuxer::Tracks::kVorbisCodecId); track->set_codec_id(mkvmuxer::Tracks::kVorbisCodecId);
} else { } else {
LOG(ERROR) << "Only Vorbis and Opus audio codec are supported in WebM."; LOG(ERROR) << "Only Vorbis and Opus audio codec are supported in WebM.";
return Status(error::UNIMPLEMENTED, return Status(error::UNIMPLEMENTED,
"Only Vorbis and Opus audio codecs are supported in WebM."); "Only Vorbis and Opus audio codecs are supported in WebM.");
} }
if (!track->SetCodecPrivate(info->codec_config().data(), if (!track->SetCodecPrivate(info.codec_config().data(),
info->codec_config().size())) { info.codec_config().size())) {
return Status(error::INTERNAL_ERROR, return Status(error::INTERNAL_ERROR,
"Private codec data required for audio streams"); "Private codec data required for audio streams");
} }
track->set_uid(info->track_id()); track->set_uid(info.track_id());
if (!info->language().empty()) if (!info.language().empty())
track->set_language(info->language().c_str()); track->set_language(info.language().c_str());
track->set_type(mkvmuxer::Tracks::kAudio); track->set_type(mkvmuxer::Tracks::kAudio);
track->set_sample_rate(info->sampling_frequency()); track->set_sample_rate(info.sampling_frequency());
track->set_channels(info->num_channels()); track->set_channels(info.num_channels());
track->set_seek_pre_roll(info->seek_preroll_ns()); track->set_seek_pre_roll(info.seek_preroll_ns());
track->set_codec_delay(info->codec_delay_ns()); track->set_codec_delay(info.codec_delay_ns());
return Status::OK; return Status::OK;
} }
@ -372,11 +377,11 @@ Status Segmenter::WriteFrame(bool write_duration) {
if (write_duration) { if (write_duration) {
frame.set_duration( frame.set_duration(
BmffTimestampToNs(prev_sample_->duration(), info_->time_scale())); BmffTimestampToNs(prev_sample_->duration(), time_scale_));
} }
frame.set_is_key(prev_sample_->is_key_frame()); frame.set_is_key(prev_sample_->is_key_frame());
frame.set_timestamp( frame.set_timestamp(
BmffTimestampToNs(prev_sample_->pts(), info_->time_scale())); BmffTimestampToNs(prev_sample_->pts(), time_scale_));
frame.set_track_number(track_id_); frame.set_track_number(track_id_);
if (prev_sample_->side_data_size() > 0) { if (prev_sample_->side_data_size() > 0) {
@ -397,7 +402,7 @@ Status Segmenter::WriteFrame(bool write_duration) {
if (!prev_sample_->is_key_frame() && !frame.CanBeSimpleBlock()) { if (!prev_sample_->is_key_frame() && !frame.CanBeSimpleBlock()) {
frame.set_reference_block_timestamp( frame.set_reference_block_timestamp(
BmffTimestampToNs(reference_frame_timestamp_, info_->time_scale())); BmffTimestampToNs(reference_frame_timestamp_, time_scale_));
} }
// GetRelativeTimecode will return -1 if the relative timecode is too large // GetRelativeTimecode will return -1 if the relative timecode is too large

View File

@ -41,7 +41,7 @@ class Segmenter {
/// @param info The stream info for the stream being segmented. /// @param info The stream info for the stream being segmented.
/// @param muxer_listener receives muxer events. Can be NULL. /// @param muxer_listener receives muxer events. Can be NULL.
/// @return OK on success, an error status otherwise. /// @return OK on success, an error status otherwise.
Status Initialize(StreamInfo* info, Status Initialize(const StreamInfo& info,
ProgressListener* progress_listener, ProgressListener* progress_listener,
MuxerListener* muxer_listener); MuxerListener* muxer_listener);
@ -52,7 +52,7 @@ class Segmenter {
/// Add sample to the indicated stream. /// Add sample to the indicated stream.
/// @param sample points to the sample to be added. /// @param sample points to the sample to be added.
/// @return OK on success, an error status otherwise. /// @return OK on success, an error status otherwise.
Status AddSample(std::shared_ptr<MediaSample> sample); Status AddSample(const MediaSample& sample);
/// Finalize the (sub)segment. /// Finalize the (sub)segment.
virtual Status FinalizeSegment(uint64_t start_timestamp, virtual Status FinalizeSegment(uint64_t start_timestamp,
@ -95,19 +95,20 @@ class Segmenter {
mkvmuxer::Cluster* cluster() { return cluster_.get(); } mkvmuxer::Cluster* cluster() { return cluster_.get(); }
mkvmuxer::Cues* cues() { return &cues_; } mkvmuxer::Cues* cues() { return &cues_; }
MuxerListener* muxer_listener() { return muxer_listener_; } MuxerListener* muxer_listener() { return muxer_listener_; }
StreamInfo* info() { return info_; }
SeekHead* seek_head() { return &seek_head_; } SeekHead* seek_head() { return &seek_head_; }
int track_id() const { return track_id_; } int track_id() const { return track_id_; }
uint64_t segment_payload_pos() const { return segment_payload_pos_; } uint64_t segment_payload_pos() const { return segment_payload_pos_; }
uint64_t duration() const { return duration_; }
virtual Status DoInitialize() = 0; virtual Status DoInitialize() = 0;
virtual Status DoFinalize() = 0; virtual Status DoFinalize() = 0;
private: private:
Status InitializeAudioTrack(const AudioStreamInfo* info, Status InitializeAudioTrack(const AudioStreamInfo& info,
mkvmuxer::AudioTrack* track); mkvmuxer::AudioTrack* track);
Status InitializeVideoTrack(const VideoStreamInfo* info, Status InitializeVideoTrack(const VideoStreamInfo& info,
mkvmuxer::VideoTrack* track); mkvmuxer::VideoTrack* track);
// Writes the previous frame to the file. // Writes the previous frame to the file.
@ -120,7 +121,7 @@ class Segmenter {
virtual Status NewSegment(uint64_t start_timestamp, bool is_subsegment) = 0; virtual Status NewSegment(uint64_t start_timestamp, bool is_subsegment) = 0;
// Store the previous sample so we know which one is the last frame. // Store the previous sample so we know which one is the last frame.
std::shared_ptr<MediaSample> prev_sample_; std::shared_ptr<const MediaSample> prev_sample_;
// The reference frame timestamp; used to populate the ReferenceBlock element // The reference frame timestamp; used to populate the ReferenceBlock element
// when writing non-keyframe BlockGroups. // when writing non-keyframe BlockGroups.
uint64_t reference_frame_timestamp_ = 0; uint64_t reference_frame_timestamp_ = 0;
@ -133,7 +134,6 @@ class Segmenter {
mkvmuxer::SegmentInfo segment_info_; mkvmuxer::SegmentInfo segment_info_;
mkvmuxer::Tracks tracks_; mkvmuxer::Tracks tracks_;
StreamInfo* info_ = nullptr;
MuxerListener* muxer_listener_ = nullptr; MuxerListener* muxer_listener_ = nullptr;
ProgressListener* progress_listener_ = nullptr; ProgressListener* progress_listener_ = nullptr;
uint64_t progress_target_ = 0; uint64_t progress_target_ = 0;
@ -151,6 +151,11 @@ class Segmenter {
bool new_subsegment_ = false; bool new_subsegment_ = false;
int track_id_ = 0; int track_id_ = 0;
// The subset of information that we need from StreamInfo
bool is_encrypted_ = false;
uint64_t time_scale_ = 0;
uint64_t duration_ = 0;
DISALLOW_COPY_AND_ASSIGN(Segmenter); DISALLOW_COPY_AND_ASSIGN(Segmenter);
}; };

View File

@ -46,7 +46,7 @@ class SegmentTestBase : public ::testing::Test {
template <typename S> template <typename S>
void CreateAndInitializeSegmenter( void CreateAndInitializeSegmenter(
const MuxerOptions& options, const MuxerOptions& options,
StreamInfo* info, const StreamInfo& info,
std::unique_ptr<webm::Segmenter>* result) const { std::unique_ptr<webm::Segmenter>* result) const {
std::unique_ptr<S> segmenter(new S(options)); std::unique_ptr<S> segmenter(new S(options));

View File

@ -144,7 +144,7 @@ class SingleSegmentSegmenterTest : public SegmentTestBase {
void InitializeSegmenter(const MuxerOptions& options) { void InitializeSegmenter(const MuxerOptions& options) {
ASSERT_NO_FATAL_FAILURE( ASSERT_NO_FATAL_FAILURE(
CreateAndInitializeSegmenter<webm::TwoPassSingleSegmentSegmenter>( CreateAndInitializeSegmenter<webm::TwoPassSingleSegmentSegmenter>(
options, info_.get(), &segmenter_)); options, *info_, &segmenter_));
} }
std::shared_ptr<StreamInfo> info_; std::shared_ptr<StreamInfo> info_;
@ -161,7 +161,7 @@ TEST_F(SingleSegmentSegmenterTest, BasicSupport) {
i == 3 ? kGenerateSideData : kNoSideData; i == 3 ? kGenerateSideData : kNoSideData;
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, side_data_flag); CreateSample(kKeyFrame, kDuration, side_data_flag);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, !kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, !kSubsegment));
ASSERT_OK(segmenter_->Finalize()); ASSERT_OK(segmenter_->Finalize());
@ -179,7 +179,7 @@ TEST_F(SingleSegmentSegmenterTest, SplitsClustersOnSegment) {
ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, !kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, !kSubsegment));
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, kNoSideData); CreateSample(kKeyFrame, kDuration, kNoSideData);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK( ASSERT_OK(
segmenter_->FinalizeSegment(5 * kDuration, 8 * kDuration, !kSubsegment)); segmenter_->FinalizeSegment(5 * kDuration, 8 * kDuration, !kSubsegment));
@ -203,7 +203,7 @@ TEST_F(SingleSegmentSegmenterTest, IgnoresSubsegment) {
ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 5 * kDuration, kSubsegment));
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, kNoSideData); CreateSample(kKeyFrame, kDuration, kNoSideData);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK(segmenter_->FinalizeSegment(0, 8 * kDuration, !kSubsegment)); ASSERT_OK(segmenter_->FinalizeSegment(0, 8 * kDuration, !kSubsegment));
ASSERT_OK(segmenter_->Finalize()); ASSERT_OK(segmenter_->Finalize());
@ -229,7 +229,7 @@ TEST_F(SingleSegmentSegmenterTest, LargeTimestamp) {
i == 3 ? kGenerateSideData : kNoSideData; i == 3 ? kGenerateSideData : kNoSideData;
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, side_data_flag); CreateSample(kKeyFrame, kDuration, side_data_flag);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK(segmenter_->FinalizeSegment(kLargeTimestamp, 5 * kDuration, ASSERT_OK(segmenter_->FinalizeSegment(kLargeTimestamp, 5 * kDuration,
!kSubsegment)); !kSubsegment));
@ -265,7 +265,7 @@ TEST_F(SingleSegmentSegmenterTest, ReallyLargeTimestamp) {
i == 3 ? kGenerateSideData : kNoSideData; i == 3 ? kGenerateSideData : kNoSideData;
std::shared_ptr<MediaSample> sample = std::shared_ptr<MediaSample> sample =
CreateSample(kKeyFrame, kDuration, side_data_flag); CreateSample(kKeyFrame, kDuration, side_data_flag);
ASSERT_OK(segmenter_->AddSample(sample)); ASSERT_OK(segmenter_->AddSample(*sample));
} }
ASSERT_OK(segmenter_->FinalizeSegment(kReallyLargeTimestamp, 5 * kDuration, ASSERT_OK(segmenter_->FinalizeSegment(kReallyLargeTimestamp, 5 * kDuration,
!kSubsegment)); !kSubsegment));

View File

@ -71,7 +71,7 @@ TwoPassSingleSegmentSegmenter::~TwoPassSingleSegmentSegmenter() {}
Status TwoPassSingleSegmentSegmenter::DoInitialize() { Status TwoPassSingleSegmentSegmenter::DoInitialize() {
// Assume the amount of time to copy the temp file as the same amount // Assume the amount of time to copy the temp file as the same amount
// of time as to make it. // of time as to make it.
set_progress_target(info()->duration() * 2); set_progress_target(duration() * 2);
if (!TempFilePath(options().temp_dir, &temp_file_name_)) if (!TempFilePath(options().temp_dir, &temp_file_name_))
return Status(error::FILE_FAILURE, "Unable to create temporary file."); return Status(error::FILE_FAILURE, "Unable to create temporary file.");

View File

@ -38,7 +38,7 @@ Status WebMMuxer::InitializeMuxer() {
} }
Status initialized = segmenter_->Initialize( Status initialized = segmenter_->Initialize(
streams()[0].get(), progress_listener(), muxer_listener()); *streams()[0], progress_listener(), muxer_listener());
if (!initialized.ok()) if (!initialized.ok())
return initialized; return initialized;
@ -58,26 +58,25 @@ Status WebMMuxer::Finalize() {
return Status::OK; return Status::OK;
} }
Status WebMMuxer::AddSample(size_t stream_id, Status WebMMuxer::AddSample(size_t stream_id, const MediaSample& sample) {
std::shared_ptr<MediaSample> sample) {
DCHECK(segmenter_); DCHECK(segmenter_);
DCHECK_EQ(stream_id, 0u); DCHECK_EQ(stream_id, 0u);
return segmenter_->AddSample(sample); return segmenter_->AddSample(sample);
} }
Status WebMMuxer::FinalizeSegment(size_t stream_id, Status WebMMuxer::FinalizeSegment(size_t stream_id,
std::shared_ptr<SegmentInfo> segment_info) { const SegmentInfo& segment_info) {
DCHECK(segmenter_); DCHECK(segmenter_);
DCHECK_EQ(stream_id, 0u); DCHECK_EQ(stream_id, 0u);
if (segment_info->key_rotation_encryption_config) { if (segment_info.key_rotation_encryption_config) {
NOTIMPLEMENTED() << "Key rotation is not implemented for WebM."; NOTIMPLEMENTED() << "Key rotation is not implemented for WebM.";
return Status(error::UNIMPLEMENTED, return Status(error::UNIMPLEMENTED,
"Key rotation is not implemented for WebM"); "Key rotation is not implemented for WebM");
} }
return segmenter_->FinalizeSegment(segment_info->start_timestamp, return segmenter_->FinalizeSegment(segment_info.start_timestamp,
segment_info->duration, segment_info.duration,
segment_info->is_subsegment); segment_info.is_subsegment);
} }
void WebMMuxer::FireOnMediaStartEvent() { void WebMMuxer::FireOnMediaStartEvent() {

View File

@ -26,10 +26,9 @@ class WebMMuxer : public Muxer {
// Muxer implementation overrides. // Muxer implementation overrides.
Status InitializeMuxer() override; Status InitializeMuxer() override;
Status Finalize() override; Status Finalize() override;
Status AddSample(size_t stream_id, Status AddSample(size_t stream_id, const MediaSample& sample) override;
std::shared_ptr<MediaSample> sample) override;
Status FinalizeSegment(size_t stream_id, Status FinalizeSegment(size_t stream_id,
std::shared_ptr<SegmentInfo> segment_info) override; const SegmentInfo& segment_info) override;
void FireOnMediaStartEvent(); void FireOnMediaStartEvent();
void FireOnMediaEndEvent(); void FireOnMediaEndEvent();

View File

@ -49,27 +49,29 @@ Status TrickPlayHandler::InitializeInternal() {
return Status::OK; return Status::OK;
} }
Status TrickPlayHandler::Process( Status TrickPlayHandler::Process(std::unique_ptr<StreamData> stream_data) {
std::unique_ptr<StreamData> input_stream_data) {
// The non-trick play stream is dispatched at index 0. // The non-trick play stream is dispatched at index 0.
// The trick-play streams are dispatched to index 1, index 2 and so on. // The trick-play streams are dispatched to index 1, index 2 and so on.
DCHECK_EQ(input_stream_data->stream_index, 0u); DCHECK(stream_data);
std::unique_ptr<StreamData> output_stream_data(new StreamData()); DCHECK_EQ(stream_data->stream_index, 0u);
*output_stream_data = *input_stream_data;
Status status = Dispatch(std::move(output_stream_data)); std::unique_ptr<StreamData> copy(new StreamData);
*copy = *stream_data;
Status status = Dispatch(std::move(copy));
if (!status.ok()) { if (!status.ok()) {
return status; return status;
} }
std::shared_ptr<StreamData> stream_data(std::move(input_stream_data)); std::shared_ptr<StreamData> shared_stream_data(std::move(stream_data));
if (stream_data->stream_data_type == StreamDataType::kStreamInfo) {
if (stream_data->stream_info->stream_type() != kStreamVideo) { if (shared_stream_data->stream_data_type == StreamDataType::kStreamInfo) {
if (shared_stream_data->stream_info->stream_type() != kStreamVideo) {
status.SetError(error::TRICK_PLAY_ERROR, status.SetError(error::TRICK_PLAY_ERROR,
"Trick play does not support non-video stream"); "Trick play does not support non-video stream");
return status; return status;
} }
const VideoStreamInfo& video_stream_info = const VideoStreamInfo& video_stream_info =
static_cast<const VideoStreamInfo&>(*stream_data->stream_info); static_cast<const VideoStreamInfo&>(*shared_stream_data->stream_info);
if (video_stream_info.trick_play_factor() > 0) { if (video_stream_info.trick_play_factor() > 0) {
status.SetError(error::TRICK_PLAY_ERROR, status.SetError(error::TRICK_PLAY_ERROR,
"This stream is alreay a trick play stream."); "This stream is alreay a trick play stream.");
@ -77,28 +79,28 @@ Status TrickPlayHandler::Process(
} }
} }
if (stream_data->stream_data_type == StreamDataType::kSegmentInfo) { if (shared_stream_data->stream_data_type == StreamDataType::kSegmentInfo) {
for (auto& cached_data : cached_stream_data_) { for (auto& cached_data : cached_stream_data_) {
// It is possible that trick play stream has large frame duration that // It is possible that trick play stream has large frame duration that
// some segments in the main stream are skipped. To avoid empty segments, // some segments in the main stream are skipped. To avoid empty segments,
// only cache SegementInfo with MediaSample before it. // only cache SegementInfo with MediaSample before it.
if (!cached_data.empty() && if (!cached_data.empty() &&
cached_data.back()->stream_data_type == StreamDataType::kMediaSample) cached_data.back()->stream_data_type == StreamDataType::kMediaSample)
cached_data.push_back(stream_data); cached_data.push_back(shared_stream_data);
} }
return Status::OK; return Status::OK;
} }
if (stream_data->stream_data_type != StreamDataType::kMediaSample) { if (shared_stream_data->stream_data_type != StreamDataType::kMediaSample) {
// Non media sample stream data needs to be dispatched to every output // Non media sample stream data needs to be dispatched to every output
// stream. It is just cached in every queue until a new key frame comes or // stream. It is just cached in every queue until a new key frame comes or
// the stream is flushed. // the stream is flushed.
for (size_t i = 0; i < cached_stream_data_.size(); ++i) for (size_t i = 0; i < cached_stream_data_.size(); ++i)
cached_stream_data_[i].push_back(stream_data); cached_stream_data_[i].push_back(shared_stream_data);
return Status::OK; return Status::OK;
} }
if (stream_data->media_sample->is_key_frame()) { if (shared_stream_data->media_sample->is_key_frame()) {
// For a new key frame, some of the trick play streams may include it. // For a new key frame, some of the trick play streams may include it.
// The cached data in those trick play streams will be processed. // The cached data in those trick play streams will be processed.
DCHECK_EQ(trick_play_factors_.size(), cached_stream_data_.size()); DCHECK_EQ(trick_play_factors_.size(), cached_stream_data_.size());
@ -118,7 +120,7 @@ Status TrickPlayHandler::Process(
if (!status.ok()) if (!status.ok())
return status; return status;
} }
cached_stream_data_[i].push_back(stream_data); cached_stream_data_[i].push_back(shared_stream_data);
} }
} }
@ -126,8 +128,8 @@ Status TrickPlayHandler::Process(
} }
total_frames_++; total_frames_++;
prev_sample_end_timestamp_ = prev_sample_end_timestamp_ = shared_stream_data->media_sample->dts() +
stream_data->media_sample->dts() + stream_data->media_sample->duration(); shared_stream_data->media_sample->duration();
return Status::OK; return Status::OK;
} }
@ -166,7 +168,7 @@ Status TrickPlayHandler::ProcessCachedStreamData(
std::deque<std::shared_ptr<StreamData>>* cached_stream_data) { std::deque<std::shared_ptr<StreamData>>* cached_stream_data) {
while (!cached_stream_data->empty()) { while (!cached_stream_data->empty()) {
Status status = Status status =
ProcessOneStreamData(output_stream_index, cached_stream_data->front()); ProcessOneStreamData(output_stream_index, *cached_stream_data->front());
if (!status.ok()) { if (!status.ok()) {
return status; return status;
} }
@ -175,17 +177,16 @@ Status TrickPlayHandler::ProcessCachedStreamData(
return Status::OK; return Status::OK;
} }
Status TrickPlayHandler::ProcessOneStreamData( Status TrickPlayHandler::ProcessOneStreamData(size_t output_stream_index,
size_t output_stream_index, const StreamData& stream_data) {
const std::shared_ptr<StreamData>& stream_data) {
size_t trick_play_index = output_stream_index - 1; size_t trick_play_index = output_stream_index - 1;
uint32_t trick_play_factor = trick_play_factors_[trick_play_index]; uint32_t trick_play_factor = trick_play_factors_[trick_play_index];
Status status; Status status;
switch (stream_data->stream_data_type) { switch (stream_data.stream_data_type) {
// trick_play_factor in StreamInfo should be modified. // trick_play_factor in StreamInfo should be modified.
case StreamDataType::kStreamInfo: { case StreamDataType::kStreamInfo: {
const VideoStreamInfo& video_stream_info = const VideoStreamInfo& video_stream_info =
static_cast<const VideoStreamInfo&>(*stream_data->stream_info); static_cast<const VideoStreamInfo&>(*stream_data.stream_info);
std::shared_ptr<VideoStreamInfo> trick_play_video_stream_info( std::shared_ptr<VideoStreamInfo> trick_play_video_stream_info(
new VideoStreamInfo(video_stream_info)); new VideoStreamInfo(video_stream_info));
trick_play_video_stream_info->set_trick_play_factor(trick_play_factor); trick_play_video_stream_info->set_trick_play_factor(trick_play_factor);
@ -197,20 +198,20 @@ Status TrickPlayHandler::ProcessOneStreamData(
break; break;
} }
case StreamDataType::kMediaSample: { case StreamDataType::kMediaSample: {
if (stream_data->media_sample->is_key_frame()) { if (stream_data.media_sample->is_key_frame()) {
std::shared_ptr<MediaSample> trick_play_media_sample = std::shared_ptr<MediaSample> trick_play_media_sample =
MediaSample::CopyFrom(*(stream_data->media_sample)); MediaSample::CopyFrom(*(stream_data.media_sample));
trick_play_media_sample->set_duration(prev_sample_end_timestamp_ - trick_play_media_sample->set_duration(prev_sample_end_timestamp_ -
stream_data->media_sample->dts()); stream_data.media_sample->dts());
status = status =
DispatchMediaSample(output_stream_index, trick_play_media_sample); DispatchMediaSample(output_stream_index, trick_play_media_sample);
} }
break; break;
} }
default: default:
std::unique_ptr<StreamData> new_stream_data(new StreamData(*stream_data)); std::unique_ptr<StreamData> copy(new StreamData(stream_data));
new_stream_data->stream_index = output_stream_index; copy->stream_index = output_stream_index;
status = Dispatch(std::move(new_stream_data)); status = Dispatch(std::move(copy));
break; break;
} }
return status; return status;

View File

@ -56,8 +56,9 @@ class TrickPlayHandler : public MediaHandler {
// Decoding timestamp for current key media sample. It is used for calculating // Decoding timestamp for current key media sample. It is used for calculating
// the duration of previous key media sample, to make sure there is no gap // the duration of previous key media sample, to make sure there is no gap
// between two key media samples. // between two key media samples.
Status ProcessOneStreamData(size_t output_stream_index, Status ProcessOneStreamData(
const std::shared_ptr<StreamData>& stream_data); size_t output_stream_index,
const StreamData& stream_data);
// Trick play factors. Note that there can be multiple trick play factors, // Trick play factors. Note that there can be multiple trick play factors,
// e.g., 2, 4 and 8. That means, one input video stream will generate 3 // e.g., 2, 4 and 8. That means, one input video stream will generate 3

View File

@ -88,8 +88,8 @@ TEST_F(TrickPlayHandlerTest, AudioStream) {
std::end(kTrickPlayFactors)); std::end(kTrickPlayFactors));
SetUpTrickPlayHandler(trick_play_factors); SetUpTrickPlayHandler(trick_play_factors);
Status status = Status status = Process(StreamData::FromStreamInfo(
Process(GetAudioStreamInfoStreamData(kStreamIndex0, kTimeScale)); kStreamIndex0, GetAudioStreamInfo(kTimeScale)));
Status kExpectStatus(error::TRICK_PLAY_ERROR, "Some Messages"); Status kExpectStatus(error::TRICK_PLAY_ERROR, "Some Messages");
EXPECT_TRUE(status.Matches(kExpectStatus)); EXPECT_TRUE(status.Matches(kExpectStatus));
} }
@ -101,7 +101,8 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithTrickPlay) {
std::end(kTrickPlayFactors)); std::end(kTrickPlayFactors));
SetUpTrickPlayHandler(trick_play_factors); SetUpTrickPlayHandler(trick_play_factors);
ASSERT_OK(Process(GetVideoStreamInfoStreamData(kStreamIndex0, kTimeScale))); ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetVideoStreamInfo(kTimeScale))));
// The stream info is cached, so the output is empty. // The stream info is cached, so the output is empty.
EXPECT_THAT( EXPECT_THAT(
GetOutputStreamDataVector(), GetOutputStreamDataVector(),
@ -113,9 +114,12 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithTrickPlay) {
const int kGOPSize = 3; const int kGOPSize = 3;
for (int i = 0; i < 3; ++i) { for (int i = 0; i < 3; ++i) {
const bool is_key_frame = (i % kGOPSize == 0); const bool is_key_frame = (i % kGOPSize == 0);
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kVideoStartTimestamp + kDuration * i, kDuration, kStreamIndex0,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + kDuration * i,
kDuration,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(
@ -136,9 +140,12 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithTrickPlay) {
// ElementsAre supports at most 10 elements. // ElementsAre supports at most 10 elements.
for (int i = 3; i < 6; ++i) { for (int i = 3; i < 6; ++i) {
const bool is_key_frame = (i % kGOPSize == 0); const bool is_key_frame = (i % kGOPSize == 0);
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kVideoStartTimestamp + kDuration * i, kDuration, kStreamIndex0,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + kDuration * i,
kDuration,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(
@ -166,9 +173,12 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithTrickPlay) {
// ElementsAre supports at most 10 elements. // ElementsAre supports at most 10 elements.
for (int i = 6; i < 8; ++i) { for (int i = 6; i < 8; ++i) {
const bool is_key_frame = (i % kGOPSize == 0); const bool is_key_frame = (i % kGOPSize == 0);
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kVideoStartTimestamp + kDuration * i, kDuration, kStreamIndex0,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + kDuration * i,
kDuration,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(
@ -219,7 +229,8 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithDecreasingTrickPlayFactors) {
std::end(kTrickPlayFactorsDecreasing)); std::end(kTrickPlayFactorsDecreasing));
SetUpTrickPlayHandler(trick_play_factors); SetUpTrickPlayHandler(trick_play_factors);
ASSERT_OK(Process(GetVideoStreamInfoStreamData(kStreamIndex0, kTimeScale))); ASSERT_OK(Process(StreamData::FromStreamInfo(
kStreamIndex0, GetVideoStreamInfo(kTimeScale))));
// The stream info is cached, so the output is empty. // The stream info is cached, so the output is empty.
EXPECT_THAT( EXPECT_THAT(
GetOutputStreamDataVector(), GetOutputStreamDataVector(),
@ -231,9 +242,12 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithDecreasingTrickPlayFactors) {
const int kGOPSize = 3; const int kGOPSize = 3;
for (int i = 0; i < 3; ++i) { for (int i = 0; i < 3; ++i) {
const bool is_key_frame = (i % kGOPSize == 0); const bool is_key_frame = (i % kGOPSize == 0);
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kVideoStartTimestamp + kDuration * i, kDuration, kStreamIndex0,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + kDuration * i,
kDuration,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(
@ -254,9 +268,12 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithDecreasingTrickPlayFactors) {
// ElementsAre supports at most 10 elements. // ElementsAre supports at most 10 elements.
for (int i = 3; i < 6; ++i) { for (int i = 3; i < 6; ++i) {
const bool is_key_frame = (i % kGOPSize == 0); const bool is_key_frame = (i % kGOPSize == 0);
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kVideoStartTimestamp + kDuration * i, kDuration, kStreamIndex0,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + kDuration * i,
kDuration,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(
@ -285,9 +302,12 @@ TEST_F(TrickPlayHandlerTest, VideoStreamWithDecreasingTrickPlayFactors) {
// ElementsAre supports at most 10 elements. // ElementsAre supports at most 10 elements.
for (int i = 6; i < 8; ++i) { for (int i = 6; i < 8; ++i) {
const bool is_key_frame = (i % kGOPSize == 0); const bool is_key_frame = (i % kGOPSize == 0);
ASSERT_OK(Process(GetMediaSampleStreamData( ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0, kVideoStartTimestamp + kDuration * i, kDuration, kStreamIndex0,
is_key_frame))); GetMediaSample(
kVideoStartTimestamp + kDuration * i,
kDuration,
is_key_frame))));
} }
EXPECT_THAT( EXPECT_THAT(