Implement ChunkingHandler

This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.

Consistent Chunking Algorithm:
 1. Find the consistent chunkable boundary
 Let the timestamps for video frames be (t1, t2, t3, ...). Then a
 consistent chunkable boundary is simply the first chunkable boundary after
 (tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
 intended chunk duration.
 2. Chunk only at the consistent chunkable boundary

This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.

Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
This commit is contained in:
Kongqun Yang 2017-02-07 10:58:47 -08:00 committed by KongQun Yang
parent 7a90ee70ab
commit 9990524f98
16 changed files with 973 additions and 119 deletions

View File

@ -118,6 +118,18 @@
'../../third_party/protobuf/protobuf.gyp:protobuf_lite',
],
},
{
'target_name': 'media_handler_test_base',
'type': '<(component)',
'sources': [
'media_handler_test_base.cc',
'media_handler_test_base.h',
],
'dependencies': [
'../../testing/gmock.gyp:gmock',
'../../testing/gtest.gyp:gtest',
],
},
{
'target_name': 'media_base_unittest',
'type': '<(gtest_target_type)',

View File

@ -35,14 +35,14 @@ struct MediaEvent {};
struct SegmentInfo {
bool is_subsegment = false;
bool is_encrypted = false;
uint64_t start_timestamp = 0;
uint64_t duration = 0;
int64_t start_timestamp = -1;
int64_t duration = 0;
};
// TODO(kqyang): Should we use protobuf?
struct StreamData {
int stream_index;
StreamDataType stream_data_type;
int stream_index = -1;
StreamDataType stream_data_type = StreamDataType::kUnknown;
std::unique_ptr<PeriodInfo> period_info;
std::unique_ptr<StreamInfo> stream_info;
@ -114,6 +114,7 @@ class MediaHandler {
std::unique_ptr<PeriodInfo> period_info) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kPeriodInfo;
stream_data->period_info = std::move(period_info);
return Dispatch(std::move(stream_data));
}
@ -123,6 +124,7 @@ class MediaHandler {
std::unique_ptr<StreamInfo> stream_info) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kStreamInfo;
stream_data->stream_info = std::move(stream_info);
return Dispatch(std::move(stream_data));
}
@ -133,6 +135,7 @@ class MediaHandler {
std::unique_ptr<EncryptionConfig> encryption_config) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kEncryptionConfig;
stream_data->encryption_config = std::move(encryption_config);
return Dispatch(std::move(stream_data));
}
@ -142,6 +145,7 @@ class MediaHandler {
std::unique_ptr<MediaSample> media_sample) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaSample;
stream_data->media_sample = std::move(media_sample);
return Dispatch(std::move(stream_data));
}
@ -151,6 +155,7 @@ class MediaHandler {
std::unique_ptr<MediaEvent> media_event) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaEvent;
stream_data->media_event = std::move(media_event);
return Dispatch(std::move(stream_data));
}
@ -160,6 +165,7 @@ class MediaHandler {
std::unique_ptr<SegmentInfo> segment_info) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kSegmentInfo;
stream_data->segment_info = std::move(segment_info);
return Dispatch(std::move(stream_data));
}

View File

@ -0,0 +1,154 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#include "packager/media/base/media_handler_test_base.h"
#include "packager/media/base/audio_stream_info.h"
#include "packager/media/base/test/status_test_util.h"
#include "packager/media/base/video_stream_info.h"
namespace {
const int kTrackId = 1;
const uint64_t kDuration = 10000;
const char kCodecString[] = "codec string";
const uint8_t kSampleBits = 1;
const uint8_t kNumChannels = 2;
const uint32_t kSamplingFrequency = 48000;
const uint64_t kSeekPrerollNs = 12345;
const uint64_t kCodecDelayNs = 56789;
const uint32_t kMaxBitrate = 13579;
const uint32_t kAvgBitrate = 13000;
const char kLanguage[] = "eng";
const uint16_t kWidth = 10u;
const uint16_t kHeight = 20u;
const uint32_t kPixelWidth = 2u;
const uint32_t kPixelHeight = 3u;
const int16_t kTrickPlayRate = 4;
const uint8_t kNaluLengthSize = 1u;
const bool kEncrypted = true;
// Use H264 code config.
const uint8_t kCodecConfig[]{
// Header
0x01, 0x64, 0x00, 0x1e, 0xff,
// SPS count (ignore top three bits)
0xe1,
// SPS
0x00, 0x19, // Size
0x67, 0x64, 0x00, 0x1e, 0xac, 0xd9, 0x40, 0xa0, 0x2f, 0xf9, 0x70, 0x11,
0x00, 0x00, 0x03, 0x03, 0xe9, 0x00, 0x00, 0xea, 0x60, 0x0f, 0x16, 0x2d,
0x96,
// PPS count
0x01,
// PPS
0x00, 0x06, // Size
0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0,
};
// Mock data, we don't really care about what is inside.
const uint8_t kData[]{
0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
};
} // namespace
namespace shaka {
namespace media {
// A fake media handler definition used for testing.
class FakeMediaHandler : public MediaHandler {
public:
const std::vector<std::unique_ptr<StreamData>>& stream_data_vector() const {
return stream_data_vector_;
}
void clear_stream_data_vector() { stream_data_vector_.clear(); }
protected:
Status InitializeInternal() override { return Status::OK; }
Status Process(std::unique_ptr<StreamData> stream_data) override {
stream_data_vector_.push_back(std::move(stream_data));
return Status::OK;
}
Status FlushStream(int input_stream_index) override { return Status::OK; }
bool ValidateOutputStreamIndex(int stream_index) const override {
return true;
}
std::vector<std::unique_ptr<StreamData>> stream_data_vector_;
};
MediaHandlerTestBase::MediaHandlerTestBase()
: next_handler_(new FakeMediaHandler),
some_handler_(new FakeMediaHandler) {}
std::unique_ptr<StreamData> MediaHandlerTestBase::GetStreamInfoStreamData(
int stream_index,
Codec codec,
uint32_t time_scale) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kStreamInfo;
stream_data->stream_info = GetMockStreamInfo(codec, time_scale);
return stream_data;
}
std::unique_ptr<StreamData> MediaHandlerTestBase::GetMediaSampleStreamData(
int stream_index,
int64_t timestamp,
int64_t duration,
bool is_keyframe) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = stream_index;
stream_data->stream_data_type = StreamDataType::kMediaSample;
stream_data->media_sample.reset(
new MediaSample(kData, sizeof(kData), nullptr, 0, is_keyframe));
stream_data->media_sample->set_dts(timestamp);
stream_data->media_sample->set_duration(duration);
return stream_data;
}
void MediaHandlerTestBase::SetUpGraph(int num_inputs,
int num_outputs,
std::shared_ptr<MediaHandler> handler) {
// Input handler is not really used anywhere but just to satisfy one input
// one output restriction for the encryption handler.
auto input_handler = std::make_shared<FakeMediaHandler>();
for (int i = 0; i < num_inputs; ++i)
ASSERT_OK(input_handler->SetHandler(i, handler));
// All outputs are routed to |next_handler_|.
for (int i = 0; i < num_outputs; ++i)
ASSERT_OK(handler->SetHandler(i, next_handler_));
}
const std::vector<std::unique_ptr<StreamData>>&
MediaHandlerTestBase::GetOutputStreamDataVector() const {
return next_handler_->stream_data_vector();
}
void MediaHandlerTestBase::ClearOutputStreamDataVector() {
next_handler_->clear_stream_data_vector();
}
std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetMockStreamInfo(
Codec codec, uint32_t time_scale) {
if (codec >= kCodecAudio && codec < kCodecAudioMaxPlusOne) {
return std::unique_ptr<StreamInfo>(new AudioStreamInfo(
kTrackId, time_scale, kDuration, codec, kCodecString, kCodecConfig,
sizeof(kCodecConfig), kSampleBits, kNumChannels, kSamplingFrequency,
kSeekPrerollNs, kCodecDelayNs, kMaxBitrate, kAvgBitrate, kLanguage,
!kEncrypted));
} else if (codec >= kCodecVideo && codec < kCodecVideoMaxPlusOne) {
return std::unique_ptr<StreamInfo>(new VideoStreamInfo(
kTrackId, time_scale, kDuration, codec, kCodecString, kCodecConfig,
sizeof(kCodecConfig), kWidth, kHeight, kPixelWidth, kPixelHeight,
kTrickPlayRate, kNaluLengthSize, kLanguage, !kEncrypted));
}
return nullptr;
}
} // namespace media
} // namespace shaka

View File

@ -0,0 +1,98 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "packager/media/base/media_handler.h"
namespace shaka {
namespace media {
class FakeMediaHandler;
MATCHER_P3(IsStreamInfo, stream_index, time_scale, encrypted, "") {
return arg->stream_index == stream_index &&
arg->stream_data_type == StreamDataType::kStreamInfo &&
arg->stream_info->time_scale() == time_scale &&
arg->stream_info->is_encrypted() == encrypted;
}
MATCHER_P4(IsSegmentInfo, stream_index, timestamp, duration, subsegment, "") {
return arg->stream_index == stream_index &&
arg->stream_data_type == StreamDataType::kSegmentInfo &&
arg->segment_info->start_timestamp == timestamp &&
arg->segment_info->duration == duration &&
arg->segment_info->is_subsegment == subsegment;
}
MATCHER_P3(IsMediaSample, stream_index, timestamp, duration, "") {
return arg->stream_index == stream_index &&
arg->stream_data_type == StreamDataType::kMediaSample &&
arg->media_sample->dts() == timestamp &&
arg->media_sample->duration() == duration;
}
class MediaHandlerTestBase : public ::testing::Test {
public:
MediaHandlerTestBase();
/// Return a stream data with mock stream info.
std::unique_ptr<StreamData> GetStreamInfoStreamData(int stream_index,
Codec codec,
uint32_t time_scale);
/// Return a stream data with mock video stream info.
std::unique_ptr<StreamData> GetVideoStreamInfoStreamData(
int stream_index,
uint32_t time_scale) {
return GetStreamInfoStreamData(stream_index, kCodecVP9, time_scale);
}
/// Return a stream data with mock audio stream info.
std::unique_ptr<StreamData> GetAudioStreamInfoStreamData(
int stream_index,
uint32_t time_scale) {
return GetStreamInfoStreamData(stream_index, kCodecAAC, time_scale);
}
/// Return a stream data with mock media sample.
std::unique_ptr<StreamData> GetMediaSampleStreamData(int stream_index,
int64_t timestamp,
int64_t duration,
bool is_keyframe);
/// Setup a graph using |handler| with |num_inputs| and |num_outputs|.
void SetUpGraph(int num_inputs,
int num_outputs,
std::shared_ptr<MediaHandler> handler);
/// Return the output stream data vector from handler.
const std::vector<std::unique_ptr<StreamData>>& GetOutputStreamDataVector()
const;
/// Clear the output stream data vector.
void ClearOutputStreamDataVector();
/// @return some random handler that can be used for testing.
std::shared_ptr<MediaHandler> some_handler() { return some_handler_; }
private:
MediaHandlerTestBase(const MediaHandlerTestBase&) = delete;
MediaHandlerTestBase& operator=(const MediaHandlerTestBase&) = delete;
// Get a mock stream info for testing.
std::unique_ptr<StreamInfo> GetMockStreamInfo(Codec codec,
uint32_t time_scale);
// Downstream handler used in testing graph.
std::shared_ptr<FakeMediaHandler> next_handler_;
// Some random handler which can be used for testing.
std::shared_ptr<MediaHandler> some_handler_;
};
} // namespace media
} // namespace shaka

View File

@ -34,6 +34,10 @@ std::string ErrorCodeToString(Code error_code) {
return "HTTP_FAILURE";
case PARSER_FAILURE:
return "PARSER_FAILURE";
case ENCRYPTION_FAILURE:
return "ENCRYPTION_FAILURE";
case CHUNKING_ERROR:
return "CHUNKING_ERROR";
case MUXER_FAILURE:
return "MUXER_FAILURE";
case FRAGMENT_FINALIZED:
@ -46,6 +50,10 @@ std::string ErrorCodeToString(Code error_code) {
return "STOPPED";
case TIME_OUT:
return "TIME_OUT";
case NOT_FOUND:
return "NOT_FOUND";
case ALREADY_EXISTS:
return "ALREADY_EXISTS";
default:
NOTIMPLEMENTED() << "Unknown Status Code: " << error_code;
return "UNKNOWN_STATUS";

View File

@ -51,6 +51,9 @@ enum Code {
// Failed to do the encryption.
ENCRYPTION_FAILURE,
// Error when trying to do chunking.
CHUNKING_ERROR,
// Fail to mux the media file.
MUXER_FAILURE,

View File

@ -22,7 +22,9 @@ enum StreamType {
enum Codec {
kUnknownCodec = 0,
kCodecH264,
kCodecVideo = 100,
kCodecH264 = kCodecVideo,
kCodecHEV1,
kCodecHVC1,
kCodecVC1,
@ -32,7 +34,10 @@ enum Codec {
kCodecVP8,
kCodecVP9,
kCodecVP10,
kCodecAAC,
kCodecVideoMaxPlusOne,
kCodecAudio = 200,
kCodecAAC = kCodecAudio,
kCodecAC3,
kCodecDTSC,
kCodecDTSE,
@ -43,8 +48,9 @@ enum Codec {
kCodecEAC3,
kCodecOpus,
kCodecVorbis,
kCodecText,
kNumCodec
kCodecAudioMaxPlusOne,
kCodecText = 300,
};
/// Abstract class holds stream information.

View File

@ -0,0 +1,39 @@
# Copyright 2017 Google Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
{
'includes': [
'../../common.gypi',
],
'targets': [
{
'target_name': 'chunking',
'type': '<(component)',
'sources': [
'chunking_handler.cc',
'chunking_handler.h',
],
'dependencies': [
'../base/media_base.gyp:media_base',
],
},
{
'target_name': 'chunking_unittest',
'type': '<(gtest_target_type)',
'sources': [
'chunking_handler_unittest.cc',
],
'dependencies': [
'../../testing/gtest.gyp:gtest',
'../../testing/gmock.gyp:gmock',
'../base/media_base.gyp:media_handler_test_base',
'../test/media_test.gyp:media_test_support',
'chunking',
]
},
],
}

View File

@ -0,0 +1,252 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#include "packager/media/chunking/chunking_handler.h"
#include "packager/base/logging.h"
#include "packager/base/threading/platform_thread.h"
#include "packager/media/base/media_sample.h"
namespace {
int64_t kThreadIdUnset = -1;
int64_t kTimeStampToDispatchAllSamples = -1;
} // namespace
namespace shaka {
namespace media {
ChunkingHandler::ChunkingHandler(const ChunkingOptions& chunking_options)
: chunking_options_(chunking_options), thread_id_(kThreadIdUnset) {
CHECK_NE(chunking_options.segment_duration_in_seconds, 0u);
}
ChunkingHandler::~ChunkingHandler() {}
Status ChunkingHandler::InitializeInternal() {
segment_info_.resize(num_input_streams());
subsegment_info_.resize(num_input_streams());
time_scales_.resize(num_input_streams());
last_sample_end_timestamps_.resize(num_input_streams());
return Status::OK;
}
Status ChunkingHandler::Process(std::unique_ptr<StreamData> stream_data) {
switch (stream_data->stream_data_type) {
case StreamDataType::kStreamInfo: {
// Make sure the inputs come from the same thread.
const int64_t thread_id =
static_cast<int64_t>(base::PlatformThread::CurrentId());
int64_t expected = kThreadIdUnset;
if (!thread_id_.compare_exchange_strong(expected, thread_id) &&
expected != thread_id) {
return Status(error::CHUNKING_ERROR,
"Inputs should come from the same thread.");
}
const auto time_scale = stream_data->stream_info->time_scale();
// The video stream is treated as the main stream. If there is only one
// stream, it is the main stream.
const bool is_main_stream =
main_stream_index_ == -1 &&
(stream_data->stream_info->stream_type() == kStreamVideo ||
num_input_streams() == 1);
if (is_main_stream) {
main_stream_index_ = stream_data->stream_index;
segment_duration_ =
chunking_options_.segment_duration_in_seconds * time_scale;
subsegment_duration_ =
chunking_options_.subsegment_duration_in_seconds * time_scale;
} else if (stream_data->stream_info->stream_type() == kStreamVideo) {
return Status(error::CHUNKING_ERROR,
"Only one video stream is allowed per chunking handler.");
}
time_scales_[stream_data->stream_index] = time_scale;
break;
}
case StreamDataType::kSegmentInfo:
VLOG(3) << "Drop existing segment info.";
return Status::OK;
case StreamDataType::kMediaSample: {
const int stream_index = stream_data->stream_index;
DCHECK_NE(time_scales_[stream_index], 0u)
<< "kStreamInfo should arrive before kMediaSample";
if (stream_index != main_stream_index_) {
if (!stream_data->media_sample->is_key_frame()) {
return Status(error::CHUNKING_ERROR,
"All non video samples should be key frames.");
}
// Cache non main stream samples, since we don't know yet whether these
// samples belong to the current or next segment.
non_main_samples_.push_back(std::move(stream_data));
// The streams are expected to be synchronized, so we don't expect to
// see a lot of samples before seeing video samples.
const size_t kMaxSamplesPerStreamBeforeVideoSample = 5u;
if (non_main_samples_.size() >
num_input_streams() * kMaxSamplesPerStreamBeforeVideoSample) {
return Status(error::CHUNKING_ERROR,
"Too many non video samples before video sample.");
}
return Status::OK;
}
const MediaSample* sample = stream_data->media_sample.get();
Status status = ProcessMediaSample(sample);
if (!status.ok())
return status;
// Discard samples before segment start.
if (!segment_info_[stream_index])
return Status::OK;
last_sample_end_timestamps_[stream_index] =
sample->dts() + sample->duration();
break;
}
default:
VLOG(3) << "Stream data type "
<< static_cast<int>(stream_data->stream_data_type) << " ignored.";
break;
}
return Dispatch(std::move(stream_data));
}
Status ChunkingHandler::FlushStream(int input_stream_index) {
if (segment_info_[input_stream_index]) {
Status status;
if (input_stream_index != main_stream_index_) {
status = DispatchNonMainSamples(kTimeStampToDispatchAllSamples);
if (!status.ok())
return status;
}
auto& segment_info = segment_info_[input_stream_index];
if (segment_info->start_timestamp != -1) {
segment_info->duration = last_sample_end_timestamps_[input_stream_index] -
segment_info->start_timestamp;
status = DispatchSegmentInfo(input_stream_index, std::move(segment_info));
if (!status.ok())
return status;
}
}
return MediaHandler::FlushStream(input_stream_index);
}
Status ChunkingHandler::ProcessMediaSample(const MediaSample* sample) {
const bool is_key_frame = sample->is_key_frame();
const int64_t timestamp = sample->dts();
// Check if we need to terminate the current (sub)segment.
bool new_segment = false;
bool new_subsegment = false;
if (is_key_frame || !chunking_options_.segment_sap_aligned) {
const int64_t segment_index = timestamp / segment_duration_;
if (segment_index != current_segment_index_) {
current_segment_index_ = segment_index;
new_segment = true;
}
}
if (!new_segment && subsegment_duration_ > 0 &&
(is_key_frame || !chunking_options_.subsegment_sap_aligned)) {
const int64_t subsegment_index =
(timestamp - segment_info_[main_stream_index_]->start_timestamp) /
subsegment_duration_;
if (subsegment_index != current_subsegment_index_) {
current_subsegment_index_ = subsegment_index;
new_subsegment = true;
}
}
Status status;
if (new_segment || new_subsegment) {
// Dispatch the samples before |timestamp| - See the implemention on how we
// determine if a sample is before |timestamp|..
status.Update(DispatchNonMainSamples(timestamp));
}
if (new_segment) {
status.Update(DispatchSegmentInfoForAllStreams());
segment_info_[main_stream_index_]->start_timestamp = timestamp;
}
if (subsegment_duration_ > 0 && (new_segment || new_subsegment)) {
status.Update(DispatchSubsegmentInfoForAllStreams());
subsegment_info_[main_stream_index_]->start_timestamp = timestamp;
}
if (!status.ok())
return status;
// Dispatch non-main samples for the next segment.
return DispatchNonMainSamples(kTimeStampToDispatchAllSamples);
}
Status ChunkingHandler::DispatchNonMainSamples(int64_t timestamp_threshold) {
Status status;
while (status.ok() && !non_main_samples_.empty()) {
DCHECK_EQ(non_main_samples_.front()->stream_data_type,
StreamDataType::kMediaSample);
const int stream_index = non_main_samples_.front()->stream_index;
const MediaSample* sample = non_main_samples_.front()->media_sample.get();
// If the portion of the sample before |timestamp_threshold| is bigger than
// the other portion, we consider it part of the current segment.
const int64_t timestamp = sample->dts() + sample->duration() / 2;
const bool stop =
(timestamp_threshold != kTimeStampToDispatchAllSamples &&
(static_cast<double>(timestamp) / time_scales_[stream_index]) >
(static_cast<double>(timestamp_threshold) /
time_scales_[main_stream_index_]));
VLOG(3) << "Sample ts: " << sample->dts() << " "
<< " duration: " << sample->duration()
<< " scale: " << time_scales_[stream_index] << "\n"
<< " threshold: " << timestamp_threshold
<< " scale: " << time_scales_[main_stream_index_]
<< (stop ? " stop "
: (segment_info_[stream_index] ? " dispatch "
: " discard "));
if (stop)
break;
// Only dispatch samples if the segment has started, otherwise discard
// them.
if (segment_info_[stream_index]) {
if (segment_info_[stream_index]->start_timestamp == -1)
segment_info_[stream_index]->start_timestamp = sample->dts();
if (subsegment_info_[stream_index] &&
subsegment_info_[stream_index]->start_timestamp == -1) {
subsegment_info_[stream_index]->start_timestamp = sample->dts();
}
last_sample_end_timestamps_[stream_index] =
sample->dts() + sample->duration();
status.Update(Dispatch(std::move(non_main_samples_.front())));
}
non_main_samples_.pop_front();
}
return status;
}
Status ChunkingHandler::DispatchSegmentInfoForAllStreams() {
Status status;
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
if (segment_info_[i] && segment_info_[i]->start_timestamp != -1) {
segment_info_[i]->duration =
last_sample_end_timestamps_[i] - segment_info_[i]->start_timestamp;
status.Update(DispatchSegmentInfo(i, std::move(segment_info_[i])));
}
segment_info_[i].reset(new SegmentInfo);
subsegment_info_[i].reset();
}
return status;
}
Status ChunkingHandler::DispatchSubsegmentInfoForAllStreams() {
Status status;
for (size_t i = 0; i < subsegment_info_.size() && status.ok(); ++i) {
if (subsegment_info_[i] && subsegment_info_[i]->start_timestamp != -1) {
subsegment_info_[i]->duration =
last_sample_end_timestamps_[i] - subsegment_info_[i]->start_timestamp;
status.Update(DispatchSegmentInfo(i, std::move(subsegment_info_[i])));
}
subsegment_info_[i].reset(new SegmentInfo);
subsegment_info_[i]->is_subsegment = true;
}
return status;
}
} // namespace media
} // namespace shaka

View File

@ -0,0 +1,123 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#ifndef PACKAGER_MEDIA_CHUNKING_CHUNKING_HANDLER_
#define PACKAGER_MEDIA_CHUNKING_CHUNKING_HANDLER_
#include <atomic>
#include "packager/media/base/media_handler.h"
namespace shaka {
namespace media {
struct ChunkingOptions {
/// Segment duration in seconds.
double segment_duration_in_seconds = 0;
/// Subsegment duration in seconds. Should not be larger than the segment
/// duration.
double subsegment_duration_in_seconds = 0;
/// Force segments to begin with stream access points. Actual segment duration
/// may not be exactly what is specified by segment_duration.
bool segment_sap_aligned = true;
/// Force subsegments to begin with stream access points. Actual subsegment
/// duration may not be exactly what is specified by subsegment_duration.
/// Setting to true implies that segment_sap_aligned is true as well.
bool subsegment_sap_aligned = true;
};
/// ChunkingHandler splits the samples into segments / subsegments based on the
/// specified chunking options.
/// This handler is a multi-in multi-out handler. If more than one input is
/// provided, there should be one and only one video stream; also, all inputs
/// should come from the same thread and are synchronized.
/// There can be multiple chunking handler running in different threads or even
/// different processes, we use the "consistent chunking algorithm" to make sure
/// the chunks in different streams are aligned without explicit communcating
/// with each other - which is not efficient and often difficult.
///
/// Consistent Chunking Algorithm:
/// 1. Find the consistent chunkable boundary
/// Let the timestamps for video frames be (t1, t2, t3, ...). Then a
/// consistent chunkable boundary is simply the first chunkable boundary after
/// (tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
/// intended chunk duration.
/// 2. Chunk only at the consistent chunkable boundary
///
/// This algorithm will make sure the chunks from different video streams are
/// aligned if they have aligned GoPs. However, this algorithm will only work
/// for video streams. To be able to chunk non video streams at similar
/// positions as video streams, ChunkingHandler is designed to accept one video
/// input and multiple non video inputs, the non video inputs are chunked when
/// the video input is chunked. If the inputs are synchronized - which is true
/// if the inputs come from the same demuxer, the video and non video chunks
/// are aligned.
class ChunkingHandler : public MediaHandler {
public:
explicit ChunkingHandler(const ChunkingOptions& chunking_options);
~ChunkingHandler() override;
protected:
/// @name MediaHandler implementation overrides.
/// @{
Status InitializeInternal() override;
Status Process(std::unique_ptr<StreamData> stream_data) override;
Status FlushStream(int input_stream_index) override;
/// @}
private:
friend class ChunkingHandlerTest;
ChunkingHandler(const ChunkingHandler&) = delete;
ChunkingHandler& operator=(const ChunkingHandler&) = delete;
// Processes media sample and apply chunking if needed.
Status ProcessMediaSample(const MediaSample* sample);
// Dispatch cached non main stream samples before |timestamp_threshold|.
Status DispatchNonMainSamples(int64_t timestamp_threshold);
// The (sub)segments are aligned and dispatched together.
Status DispatchSegmentInfoForAllStreams();
Status DispatchSubsegmentInfoForAllStreams();
const ChunkingOptions chunking_options_;
// The inputs are expected to come from the same thread.
std::atomic<int64_t> thread_id_;
// The video stream is the main stream; if there is only one stream, it is the
// main stream. The chunking is based on the main stream.
int main_stream_index_ = -1;
// Segment and subsegment duration in main stream's time scale.
int64_t segment_duration_ = 0;
int64_t subsegment_duration_ = 0;
// The streams are expected to be synchronized. Cache non main (video) stream
// samples so we can determine whether the next segment should include these
// samples. The samples will be dispatched after seeing the next main stream
// sample.
std::deque<std::unique_ptr<StreamData>> non_main_samples_;
// Current segment index, useful to determine where to do chunking.
int64_t current_segment_index_ = -1;
// Current subsegment index, useful to determine where to do chunking.
int64_t current_subsegment_index_ = -1;
std::vector<std::unique_ptr<SegmentInfo>> segment_info_;
std::vector<std::unique_ptr<SegmentInfo>> subsegment_info_;
std::vector<uint32_t> time_scales_;
// The end timestamp of the last dispatched sample.
std::vector<int64_t> last_sample_end_timestamps_;
};
} // namespace media
} // namespace shaka
#endif // PACKAGER_MEDIA_CHUNKING_CHUNKING_HANDLER_

View File

@ -0,0 +1,237 @@
// Copyright 2017 Google Inc. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#include "packager/media/chunking/chunking_handler.h"
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "packager/media/base/media_handler_test_base.h"
#include "packager/media/base/test/status_test_util.h"
using ::testing::ElementsAre;
using ::testing::IsEmpty;
namespace shaka {
namespace media {
namespace {
const int kStreamIndex0 = 0;
const int kStreamIndex1 = 1;
const uint32_t kTimeScale0 = 800;
const uint32_t kTimeScale1 = 1000;
const int64_t kDuration0 = 200;
const int64_t kDuration1 = 300;
const bool kKeyFrame = true;
const bool kIsSubsegment = true;
const bool kEncrypted = true;
} // namespace
class ChunkingHandlerTest : public MediaHandlerTestBase {
public:
void SetUpChunkingHandler(int num_inputs,
const ChunkingOptions& chunking_options) {
chunking_handler_.reset(new ChunkingHandler(chunking_options));
SetUpGraph(num_inputs, num_inputs, chunking_handler_);
ASSERT_OK(chunking_handler_->Initialize());
}
Status Process(std::unique_ptr<StreamData> stream_data) {
return chunking_handler_->Process(std::move(stream_data));
}
Status FlushStream(int stream_index) {
return chunking_handler_->FlushStream(stream_index);
}
protected:
std::shared_ptr<ChunkingHandler> chunking_handler_;
};
TEST_F(ChunkingHandlerTest, AudioNoSubsegmentsThenFlush) {
ChunkingOptions chunking_options;
chunking_options.segment_duration_in_seconds = 1;
SetUpChunkingHandler(1, chunking_options);
ASSERT_OK(Process(GetAudioStreamInfoStreamData(kStreamIndex0, kTimeScale0)));
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted)));
for (int i = 0; i < 5; ++i) {
ClearOutputStreamDataVector();
ASSERT_OK(Process(GetMediaSampleStreamData(kStreamIndex0, i * kDuration1,
kDuration1, kKeyFrame)));
// One output stream_data except when i == 3, which also has SegmentInfo.
if (i == 3) {
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsSegmentInfo(kStreamIndex0, 0, kDuration1 * 3, !kIsSubsegment),
IsMediaSample(kStreamIndex0, i * kDuration1, kDuration1)));
} else {
EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsMediaSample(kStreamIndex0, i * kDuration1,
kDuration1)));
}
}
ClearOutputStreamDataVector();
ASSERT_OK(FlushStream(kStreamIndex0));
EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsSegmentInfo(kStreamIndex0, kDuration1 * 3,
kDuration1 * 2, !kIsSubsegment)));
}
TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
ChunkingOptions chunking_options;
chunking_options.segment_duration_in_seconds = 1;
chunking_options.subsegment_duration_in_seconds = 0.3;
SetUpChunkingHandler(1, chunking_options);
ASSERT_OK(Process(GetVideoStreamInfoStreamData(kStreamIndex0, kTimeScale1)));
const int64_t kVideoStartTimestamp = 12345;
for (int i = 0; i < 6; ++i) {
// Alternate key frame.
const bool is_key_frame = (i % 2) == 1;
ASSERT_OK(Process(GetMediaSampleStreamData(
kStreamIndex0, kVideoStartTimestamp + i * kDuration1, kDuration1,
is_key_frame)));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsStreamInfo(kStreamIndex0, kTimeScale1, !kEncrypted),
// The first samples @ kStartTimestamp is discarded - not key frame.
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1,
kDuration1),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,
kDuration1),
// The next segment boundary 13245 / 1000 != 12645 / 1000.
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp + kDuration1,
kDuration1 * 2, !kIsSubsegment),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 3,
kDuration1),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 4,
kDuration1),
// The subsegment has duration kDuration1 * 2 since it can only
// terminate before key frame.
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 3,
kDuration1 * 2, kIsSubsegment),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 5,
kDuration1)));
}
TEST_F(ChunkingHandlerTest, AudioAndVideo) {
ChunkingOptions chunking_options;
chunking_options.segment_duration_in_seconds = 1;
chunking_options.subsegment_duration_in_seconds = 0.3;
SetUpChunkingHandler(2, chunking_options);
ASSERT_OK(Process(GetAudioStreamInfoStreamData(kStreamIndex0, kTimeScale0)));
ASSERT_OK(Process(GetVideoStreamInfoStreamData(kStreamIndex1, kTimeScale1)));
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex0, kTimeScale0, !kEncrypted),
IsStreamInfo(kStreamIndex1, kTimeScale1, !kEncrypted)));
ClearOutputStreamDataVector();
// Equivalent to 12345 in video timescale.
const int64_t kAudioStartTimestamp = 9876;
const int64_t kVideoStartTimestamp = 12345;
for (int i = 0; i < 5; ++i) {
ASSERT_OK(Process(GetMediaSampleStreamData(
kStreamIndex0, kAudioStartTimestamp + kDuration0 * i, kDuration0,
true)));
// Alternate key frame.
const bool is_key_frame = (i % 2) == 1;
ASSERT_OK(Process(GetMediaSampleStreamData(
kStreamIndex1, kVideoStartTimestamp + kDuration1 * i, kDuration1,
is_key_frame)));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
// The first samples @ kStartTimestamp is discarded - not key frame.
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0,
kDuration0),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1,
kDuration1),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 2,
kDuration0),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 2,
kDuration1),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 3,
kDuration0),
// The audio segment is terminated together with video stream.
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0,
kDuration0 * 3, !kIsSubsegment),
// The next segment boundary 13245 / 1000 != 12645 / 1000.
IsSegmentInfo(kStreamIndex1, kVideoStartTimestamp + kDuration1,
kDuration1 * 2, !kIsSubsegment),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
kDuration1),
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
kDuration0),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 4,
kDuration1)));
ClearOutputStreamDataVector();
// The side comments below show the equivalent timestamp in video timescale.
// The audio and video are made ~aligned.
ASSERT_OK(Process(GetMediaSampleStreamData(kStreamIndex0,
kAudioStartTimestamp + kDuration0 * 5,
kDuration0, true))); // 13595
ASSERT_OK(Process(GetMediaSampleStreamData(kStreamIndex1,
kVideoStartTimestamp + kDuration1 * 5,
kDuration1, true))); // 13845
ASSERT_OK(Process(GetMediaSampleStreamData(kStreamIndex0,
kAudioStartTimestamp + kDuration0 * 6,
kDuration0, true))); // 13845
// This expectation are separated from the expectation above because
// ElementsAre supports at most 10 elements.
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 5,
kDuration0),
// Audio is terminated along with video below.
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
kDuration0 * 2, kIsSubsegment),
// The subsegment has duration kDuration1 * 2 since it can only
// terminate before key frame.
IsSegmentInfo(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 3,
kDuration1 * 2, kIsSubsegment),
IsMediaSample(kStreamIndex1, kVideoStartTimestamp + kDuration1 * 5,
kDuration1)));
ClearOutputStreamDataVector();
ASSERT_OK(FlushStream(kStreamIndex0));
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
IsMediaSample(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 6,
kDuration0),
IsSegmentInfo(kStreamIndex0, kAudioStartTimestamp + kDuration0 * 4,
kDuration0 * 3, !kIsSubsegment)));
ClearOutputStreamDataVector();
ASSERT_OK(FlushStream(kStreamIndex1));
EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsSegmentInfo(kStreamIndex1,
kVideoStartTimestamp + kDuration1 * 3,
kDuration1 * 3, !kIsSubsegment)));
// Flush again will do nothing.
ClearOutputStreamDataVector();
ASSERT_OK(FlushStream(kStreamIndex0));
ASSERT_OK(FlushStream(kStreamIndex1));
EXPECT_THAT(GetOutputStreamDataVector(), IsEmpty());
}
} // namespace media
} // namespace shaka

View File

@ -30,6 +30,7 @@
'dependencies': [
'../../testing/gtest.gyp:gtest',
'../../testing/gmock.gyp:gmock',
'../base/media_base.gyp:media_handler_test_base',
'../test/media_test.gyp:media_test_support',
'crypto',
]

View File

@ -14,6 +14,7 @@
#include "packager/media/base/aes_encryptor.h"
#include "packager/media/base/aes_pattern_cryptor.h"
#include "packager/media/base/key_source.h"
#include "packager/media/base/media_sample.h"
#include "packager/media/base/video_stream_info.h"
#include "packager/media/codecs/video_slice_header_parser.h"
#include "packager/media/codecs/vp8_parser.h"
@ -104,11 +105,13 @@ Status EncryptionHandler::Process(std::unique_ptr<StreamData> stream_data) {
status = ProcessStreamInfo(stream_data->stream_info.get());
break;
case StreamDataType::kSegmentInfo:
if (!stream_data->segment_info->is_subsegment) {
new_segment_ = true;
if (remaining_clear_lead_ > 0)
remaining_clear_lead_ -= stream_data->segment_info->duration;
else
stream_data->segment_info->is_encrypted = true;
}
break;
case StreamDataType::kMediaSample:
status = ProcessMediaSample(stream_data->media_sample.get());

View File

@ -9,7 +9,6 @@
#include "packager/media/base/key_source.h"
#include "packager/media/base/media_handler.h"
#include "packager/media/base/stream_info.h"
namespace shaka {
namespace media {
@ -66,7 +65,7 @@ class EncryptionHandler : public MediaHandler {
// Processes |stream_info| and sets up stream specific variables.
Status ProcessStreamInfo(StreamInfo* stream_info);
// Processes media sample end encrypts it if needed.
// Processes media sample and encrypts it if needed.
Status ProcessMediaSample(MediaSample* sample);
bool CreateEncryptor(EncryptionKey* encryption_key);
@ -81,7 +80,7 @@ class EncryptionHandler : public MediaHandler {
void InjectVideoSliceHeaderParserForTesting(
std::unique_ptr<VideoSliceHeaderParser> header_parser);
EncryptionOptions encryption_options_;
const EncryptionOptions encryption_options_;
KeySource* key_source_ = nullptr;
KeySource::TrackType track_type_ = KeySource::TRACK_TYPE_UNKNOWN;
std::unique_ptr<AesCryptor> encryptor_;

View File

@ -11,10 +11,9 @@
#include "packager/media/base/aes_decryptor.h"
#include "packager/media/base/aes_pattern_cryptor.h"
#include "packager/media/base/audio_stream_info.h"
#include "packager/media/base/fixed_key_source.h"
#include "packager/media/base/media_handler_test_base.h"
#include "packager/media/base/test/status_test_util.h"
#include "packager/media/base/video_stream_info.h"
#include "packager/media/codecs/video_slice_header_parser.h"
#include "packager/media/codecs/vpx_parser.h"
@ -40,26 +39,6 @@ class MockKeySource : public FixedKeySource {
EncryptionKey* key));
};
class FakeMediaHandler : public MediaHandler {
public:
const std::vector<std::unique_ptr<StreamData>>& stream_data_vector() const {
return stream_data_vector_;
}
void clear_stream_data_vector() { stream_data_vector_.clear(); }
protected:
Status InitializeInternal() override { return Status::OK; }
Status Process(std::unique_ptr<StreamData> stream_data) override {
stream_data_vector_.push_back(std::move(stream_data));
return Status::OK;
}
bool ValidateOutputStreamIndex(int stream_index) const override {
return stream_index == 0;
}
std::vector<std::unique_ptr<StreamData>> stream_data_vector_;
};
class MockVpxParser : public VPxParser {
public:
MOCK_METHOD3(Parse,
@ -77,20 +56,14 @@ class MockVideoSliceHeaderParser : public VideoSliceHeaderParser {
} // namespace
class EncryptionHandlerTest : public ::testing::Test {
class EncryptionHandlerTest : public MediaHandlerTestBase {
public:
void SetUp() override { SetUpEncryptionHandler(EncryptionOptions()); }
void SetUpEncryptionHandler(const EncryptionOptions& encryption_options) {
encryption_handler_.reset(
new EncryptionHandler(encryption_options, &mock_key_source_));
next_handler_.reset(new FakeMediaHandler);
// Input handler is not really used anywhere but just to satisfy one input
// one output restriction for the encryption handler.
auto input_handler = std::make_shared<FakeMediaHandler>();
ASSERT_OK(input_handler->AddHandler(encryption_handler_));
ASSERT_OK(encryption_handler_->AddHandler(next_handler_));
SetUpGraph(1 /* one input */, 1 /* one output */, encryption_handler_);
}
Status Process(std::unique_ptr<StreamData> stream_data) {
@ -109,7 +82,6 @@ class EncryptionHandlerTest : public ::testing::Test {
protected:
std::shared_ptr<EncryptionHandler> encryption_handler_;
std::shared_ptr<FakeMediaHandler> next_handler_;
MockKeySource mock_key_source_;
};
@ -118,61 +90,26 @@ TEST_F(EncryptionHandlerTest, Initialize) {
}
TEST_F(EncryptionHandlerTest, OnlyOneOutput) {
auto another_handler = std::make_shared<FakeMediaHandler>();
// Connecting another handler will fail.
ASSERT_EQ(error::INVALID_ARGUMENT,
encryption_handler_->AddHandler(another_handler).error_code());
encryption_handler_->AddHandler(some_handler()).error_code());
}
TEST_F(EncryptionHandlerTest, OnlyOneInput) {
auto another_handler = std::make_shared<FakeMediaHandler>();
ASSERT_OK(another_handler->AddHandler(encryption_handler_));
ASSERT_OK(some_handler()->AddHandler(encryption_handler_));
ASSERT_EQ(error::INVALID_ARGUMENT,
encryption_handler_->Initialize().error_code());
}
namespace {
const int kTrackId = 1;
const uint32_t kTimeScale = 1000;
const uint64_t kDuration = 10000;
const char kCodecString[] = "codec string";
const uint8_t kSampleBits = 1;
const uint8_t kNumChannels = 2;
const uint32_t kSamplingFrequency = 48000;
const uint64_t kSeekPrerollNs = 12345;
const uint64_t kCodecDelayNs = 56789;
const uint32_t kMaxBitrate = 13579;
const uint32_t kAvgBitrate = 13000;
const char kLanguage[] = "eng";
const uint16_t kWidth = 10u;
const uint16_t kHeight = 20u;
const uint32_t kPixelWidth = 2u;
const uint32_t kPixelHeight = 3u;
const int16_t kTrickPlayRate = 4;
const uint8_t kNaluLengthSize = 1u;
const int kStreamIndex = 0;
const bool kEncrypted = true;
const uint32_t kTimeScale = 1000;
const uint32_t kMaxSdPixels = 100u;
const uint32_t kMaxHdPixels = 200u;
const uint32_t kMaxUhd1Pixels = 300u;
// Use H264 code config.
const uint8_t kCodecConfig[]{
// Header
0x01, 0x64, 0x00, 0x1e, 0xff,
// SPS count (ignore top three bits)
0xe1,
// SPS
0x00, 0x19, // Size
0x67, 0x64, 0x00, 0x1e, 0xac, 0xd9, 0x40, 0xa0, 0x2f, 0xf9, 0x70, 0x11,
0x00, 0x00, 0x03, 0x03, 0xe9, 0x00, 0x00, 0xea, 0x60, 0x0f, 0x16, 0x2d,
0x96,
// PPS count
0x01,
// PPS
0x00, 0x06, // Size
0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0,
};
// The data is based on H264. The same data is also used to test audio, which
// does not care the underlying data, and VP9, for which we will mock the
// parser.
@ -222,22 +159,6 @@ class EncryptionHandlerEncryptionTest
SetUpEncryptionHandler(encryption_options);
}
std::unique_ptr<StreamInfo> GetMockStreamInfo() {
if (codec_ == kCodecAAC) {
return std::unique_ptr<StreamInfo>(new AudioStreamInfo(
kTrackId, kTimeScale, kDuration, codec_, kCodecString, kCodecConfig,
sizeof(kCodecConfig), kSampleBits, kNumChannels, kSamplingFrequency,
kSeekPrerollNs, kCodecDelayNs, kMaxBitrate, kAvgBitrate, kLanguage,
!kEncrypted));
} else {
return std::unique_ptr<StreamInfo>(new VideoStreamInfo(
kTrackId, kTimeScale, kDuration, codec_, kCodecString, kCodecConfig,
sizeof(kCodecConfig), kWidth, kHeight, kPixelWidth, kPixelHeight,
kTrickPlayRate, kNaluLengthSize, kLanguage, !kEncrypted));
}
}
std::vector<VPxFrameInfo> GetMockVpxFrameInfo() {
std::vector<VPxFrameInfo> vpx_frames;
vpx_frames.resize(2);
@ -373,17 +294,9 @@ class EncryptionHandlerEncryptionTest
};
TEST_P(EncryptionHandlerEncryptionTest, Encrypt) {
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = 0;
stream_data->stream_data_type = StreamDataType::kStreamInfo;
stream_data->stream_info = GetMockStreamInfo();
ASSERT_OK(Process(std::move(stream_data)));
ASSERT_EQ(1u, next_handler_->stream_data_vector().size());
ASSERT_EQ(0, next_handler_->stream_data_vector().back()->stream_index);
ASSERT_EQ(StreamDataType::kStreamInfo,
next_handler_->stream_data_vector().back()->stream_data_type);
ASSERT_TRUE(
next_handler_->stream_data_vector().back()->stream_info->is_encrypted());
ASSERT_OK(Process(GetStreamInfoStreamData(kStreamIndex, codec_, kTimeScale)));
EXPECT_THAT(GetOutputStreamDataVector(),
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted)));
// Inject vpx parser / video slice header parser if needed.
switch (codec_) {
@ -410,7 +323,7 @@ TEST_P(EncryptionHandlerEncryptionTest, Encrypt) {
break;
}
stream_data.reset(new StreamData);
std::unique_ptr<StreamData> stream_data(new StreamData);
stream_data->stream_index = 0;
stream_data->stream_data_type = StreamDataType::kMediaSample;
stream_data->media_sample.reset(
@ -420,13 +333,12 @@ TEST_P(EncryptionHandlerEncryptionTest, Encrypt) {
.WillOnce(
DoAll(SetArgPointee<1>(GetMockEncryptionKey()), Return(Status::OK)));
ASSERT_OK(Process(std::move(stream_data)));
ASSERT_EQ(2u, next_handler_->stream_data_vector().size());
ASSERT_EQ(0, next_handler_->stream_data_vector().back()->stream_index);
ASSERT_EQ(2u, GetOutputStreamDataVector().size());
ASSERT_EQ(0, GetOutputStreamDataVector().back()->stream_index);
ASSERT_EQ(StreamDataType::kMediaSample,
next_handler_->stream_data_vector().back()->stream_data_type);
GetOutputStreamDataVector().back()->stream_data_type);
auto* media_sample =
next_handler_->stream_data_vector().back()->media_sample.get();
auto* media_sample = GetOutputStreamDataVector().back()->media_sample.get();
auto* decrypt_config = media_sample->decrypt_config();
EXPECT_EQ(std::vector<uint8_t>(kKeyId, kKeyId + sizeof(kKeyId)),
decrypt_config->key_id());

View File

@ -115,6 +115,7 @@
'dependencies': [
'hls/hls.gyp:hls_unittest',
'media/base/media_base.gyp:media_base_unittest',
'media/chunking/chunking.gyp:chunking_unittest',
'media/codecs/codecs.gyp:codecs_unittest',
'media/crypto/crypto.gyp:crypto_unittest',
'media/event/media_event.gyp:media_event_unittest',