2017-02-02 18:28:29 +00:00
|
|
|
// Copyright 2017 Google Inc. All rights reserved.
|
|
|
|
//
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file or at
|
|
|
|
// https://developers.google.com/open-source/licenses/bsd
|
|
|
|
|
|
|
|
#include "packager/media/crypto/encryption_handler.h"
|
|
|
|
|
|
|
|
#include <gmock/gmock.h>
|
|
|
|
#include <gtest/gtest.h>
|
|
|
|
|
|
|
|
#include "packager/media/base/aes_decryptor.h"
|
|
|
|
#include "packager/media/base/aes_pattern_cryptor.h"
|
Implement ChunkingHandler
This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.
Consistent Chunking Algorithm:
1. Find the consistent chunkable boundary
Let the timestamps for video frames be (t1, t2, t3, ...). Then a
consistent chunkable boundary is simply the first chunkable boundary after
(tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
intended chunk duration.
2. Chunk only at the consistent chunkable boundary
This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.
Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
2017-02-07 18:58:47 +00:00
|
|
|
#include "packager/media/base/media_handler_test_base.h"
|
2017-10-17 23:03:08 +00:00
|
|
|
#include "packager/media/base/raw_key_source.h"
|
2017-02-02 18:28:29 +00:00
|
|
|
#include "packager/media/codecs/video_slice_header_parser.h"
|
|
|
|
#include "packager/media/codecs/vpx_parser.h"
|
2017-06-29 22:23:53 +00:00
|
|
|
#include "packager/status_test_util.h"
|
2017-02-02 18:28:29 +00:00
|
|
|
|
|
|
|
namespace shaka {
|
|
|
|
namespace media {
|
|
|
|
namespace {
|
|
|
|
|
|
|
|
using ::testing::_;
|
|
|
|
using ::testing::Combine;
|
|
|
|
using ::testing::DoAll;
|
|
|
|
using ::testing::ElementsAre;
|
2017-03-11 02:48:04 +00:00
|
|
|
using ::testing::Mock;
|
2017-02-02 18:28:29 +00:00
|
|
|
using ::testing::Return;
|
|
|
|
using ::testing::SetArgPointee;
|
2017-03-11 02:48:04 +00:00
|
|
|
using ::testing::StrictMock;
|
2017-02-02 18:28:29 +00:00
|
|
|
using ::testing::Values;
|
2017-03-11 02:48:04 +00:00
|
|
|
using ::testing::ValuesIn;
|
2017-02-02 18:28:29 +00:00
|
|
|
using ::testing::WithParamInterface;
|
|
|
|
|
2017-05-22 20:31:41 +00:00
|
|
|
const char kAudioStreamLabel[] = "AUDIO";
|
|
|
|
const char kSdVideoStreamLabel[] = "SD";
|
|
|
|
|
2017-03-11 02:48:04 +00:00
|
|
|
const uint8_t kKeyId[]{
|
|
|
|
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
|
|
|
0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
|
|
|
|
};
|
|
|
|
const uint8_t kKey[]{
|
|
|
|
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
|
|
|
0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
|
|
|
|
};
|
|
|
|
const uint8_t kIv[]{
|
|
|
|
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
|
|
|
0x08, 0x09, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15,
|
|
|
|
};
|
|
|
|
|
|
|
|
// The default KID for key rotation is all 0s.
|
|
|
|
const uint8_t kKeyRotationDefaultKeyId[] = {
|
|
|
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
|
|
|
};
|
|
|
|
|
2017-10-17 23:03:08 +00:00
|
|
|
class MockKeySource : public RawKeySource {
|
2017-02-02 18:28:29 +00:00
|
|
|
public:
|
2017-06-13 21:54:12 +00:00
|
|
|
MOCK_METHOD2(GetKey,
|
|
|
|
Status(const std::string& stream_label, EncryptionKey* key));
|
2017-02-02 18:28:29 +00:00
|
|
|
MOCK_METHOD3(GetCryptoPeriodKey,
|
|
|
|
Status(uint32_t crypto_period_index,
|
2017-06-13 21:54:12 +00:00
|
|
|
const std::string& stream_label,
|
2017-02-02 18:28:29 +00:00
|
|
|
EncryptionKey* key));
|
|
|
|
};
|
|
|
|
|
|
|
|
class MockVpxParser : public VPxParser {
|
|
|
|
public:
|
|
|
|
MOCK_METHOD3(Parse,
|
|
|
|
bool(const uint8_t* data,
|
|
|
|
size_t data_size,
|
|
|
|
std::vector<VPxFrameInfo>* vpx_frames));
|
|
|
|
};
|
|
|
|
|
|
|
|
class MockVideoSliceHeaderParser : public VideoSliceHeaderParser {
|
|
|
|
public:
|
|
|
|
MOCK_METHOD1(Initialize,
|
|
|
|
bool(const std::vector<uint8_t>& decoder_configuration));
|
|
|
|
MOCK_METHOD1(GetHeaderSize, int64_t(const Nalu& nalu));
|
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
2017-09-14 16:23:29 +00:00
|
|
|
class EncryptionHandlerTest : public MediaHandlerGraphTestBase {
|
2017-02-02 18:28:29 +00:00
|
|
|
public:
|
2017-07-05 23:47:55 +00:00
|
|
|
void SetUp() override { SetUpEncryptionHandler(EncryptionParams()); }
|
2017-02-02 18:28:29 +00:00
|
|
|
|
2017-07-05 23:47:55 +00:00
|
|
|
void SetUpEncryptionHandler(const EncryptionParams& encryption_params) {
|
|
|
|
EncryptionParams new_encryption_params = encryption_params;
|
|
|
|
if (!encryption_params.stream_label_func) {
|
2017-05-22 20:31:41 +00:00
|
|
|
// Setup default stream label function.
|
2017-07-05 23:47:55 +00:00
|
|
|
new_encryption_params.stream_label_func =
|
2017-05-22 20:31:41 +00:00
|
|
|
[](const EncryptionParams::EncryptedStreamAttributes&
|
|
|
|
stream_attributes) {
|
|
|
|
if (stream_attributes.stream_type ==
|
|
|
|
EncryptionParams::EncryptedStreamAttributes::kAudio) {
|
|
|
|
return kAudioStreamLabel;
|
|
|
|
}
|
|
|
|
return kSdVideoStreamLabel;
|
|
|
|
};
|
|
|
|
}
|
2017-02-02 18:28:29 +00:00
|
|
|
encryption_handler_.reset(
|
2017-07-05 23:47:55 +00:00
|
|
|
new EncryptionHandler(new_encryption_params, &mock_key_source_));
|
Implement ChunkingHandler
This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.
Consistent Chunking Algorithm:
1. Find the consistent chunkable boundary
Let the timestamps for video frames be (t1, t2, t3, ...). Then a
consistent chunkable boundary is simply the first chunkable boundary after
(tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
intended chunk duration.
2. Chunk only at the consistent chunkable boundary
This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.
Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
2017-02-07 18:58:47 +00:00
|
|
|
SetUpGraph(1 /* one input */, 1 /* one output */, encryption_handler_);
|
2017-02-02 18:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
Status Process(std::unique_ptr<StreamData> stream_data) {
|
|
|
|
return encryption_handler_->Process(std::move(stream_data));
|
|
|
|
}
|
|
|
|
|
2017-03-11 02:48:04 +00:00
|
|
|
EncryptionKey GetMockEncryptionKey() {
|
|
|
|
EncryptionKey encryption_key;
|
|
|
|
encryption_key.key_id.assign(kKeyId, kKeyId + sizeof(kKeyId));
|
|
|
|
encryption_key.key.assign(kKey, kKey + sizeof(kKey));
|
|
|
|
encryption_key.iv.assign(kIv, kIv + sizeof(kIv));
|
|
|
|
return encryption_key;
|
|
|
|
}
|
|
|
|
|
2017-02-02 18:28:29 +00:00
|
|
|
void InjectVpxParserForTesting(std::unique_ptr<VPxParser> vpx_parser) {
|
|
|
|
encryption_handler_->InjectVpxParserForTesting(std::move(vpx_parser));
|
|
|
|
}
|
|
|
|
|
|
|
|
void InjectVideoSliceHeaderParserForTesting(
|
|
|
|
std::unique_ptr<VideoSliceHeaderParser> header_parser) {
|
|
|
|
encryption_handler_->InjectVideoSliceHeaderParserForTesting(
|
|
|
|
std::move(header_parser));
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
std::shared_ptr<EncryptionHandler> encryption_handler_;
|
2017-03-11 02:48:04 +00:00
|
|
|
StrictMock<MockKeySource> mock_key_source_;
|
2017-02-02 18:28:29 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(EncryptionHandlerTest, Initialize) {
|
|
|
|
ASSERT_OK(encryption_handler_->Initialize());
|
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(EncryptionHandlerTest, OnlyOneOutput) {
|
|
|
|
// Connecting another handler will fail.
|
2017-02-24 01:17:47 +00:00
|
|
|
ASSERT_OK(encryption_handler_->AddHandler(some_handler()));
|
2017-02-02 18:28:29 +00:00
|
|
|
ASSERT_EQ(error::INVALID_ARGUMENT,
|
2017-02-24 01:17:47 +00:00
|
|
|
encryption_handler_->Initialize().error_code());
|
2017-02-02 18:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_F(EncryptionHandlerTest, OnlyOneInput) {
|
Implement ChunkingHandler
This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.
Consistent Chunking Algorithm:
1. Find the consistent chunkable boundary
Let the timestamps for video frames be (t1, t2, t3, ...). Then a
consistent chunkable boundary is simply the first chunkable boundary after
(tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
intended chunk duration.
2. Chunk only at the consistent chunkable boundary
This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.
Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
2017-02-07 18:58:47 +00:00
|
|
|
ASSERT_OK(some_handler()->AddHandler(encryption_handler_));
|
2017-02-02 18:28:29 +00:00
|
|
|
ASSERT_EQ(error::INVALID_ARGUMENT,
|
|
|
|
encryption_handler_->Initialize().error_code());
|
|
|
|
}
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
|
2017-03-28 15:19:15 +00:00
|
|
|
const bool kVp9SubsampleEncryption = true;
|
2017-03-11 02:48:04 +00:00
|
|
|
const bool kIsKeyFrame = true;
|
|
|
|
const bool kIsSubsegment = true;
|
2017-02-02 18:28:29 +00:00
|
|
|
const bool kEncrypted = true;
|
2017-03-11 02:48:04 +00:00
|
|
|
const size_t kStreamIndex = 0;
|
Implement ChunkingHandler
This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.
Consistent Chunking Algorithm:
1. Find the consistent chunkable boundary
Let the timestamps for video frames be (t1, t2, t3, ...). Then a
consistent chunkable boundary is simply the first chunkable boundary after
(tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
intended chunk duration.
2. Chunk only at the consistent chunkable boundary
This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.
Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
2017-02-07 18:58:47 +00:00
|
|
|
const uint32_t kTimeScale = 1000;
|
2017-03-11 02:48:04 +00:00
|
|
|
const int64_t kSampleDuration = 1000;
|
|
|
|
const int64_t kSegmentDuration = 1000;
|
2017-02-02 18:28:29 +00:00
|
|
|
|
|
|
|
// The data is based on H264. The same data is also used to test audio, which
|
|
|
|
// does not care the underlying data, and VP9, for which we will mock the
|
|
|
|
// parser.
|
|
|
|
const uint8_t kData[]{
|
|
|
|
// First NALU
|
2017-03-11 02:48:04 +00:00
|
|
|
0x30, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
|
|
|
|
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21,
|
|
|
|
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, 0x32, 0x33,
|
|
|
|
0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
|
|
|
|
0x46,
|
2017-02-02 18:28:29 +00:00
|
|
|
// Second NALU
|
2017-03-11 02:48:04 +00:00
|
|
|
0x31, 0x25, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09,
|
|
|
|
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x20, 0x21,
|
|
|
|
0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x30, 0x31, 0x32, 0x33,
|
|
|
|
0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45,
|
|
|
|
0x46, 0x47,
|
|
|
|
// Third non-video-slice NALU for H264 or superframe index for VP9.
|
2017-02-02 18:28:29 +00:00
|
|
|
0x06, 0x67, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e,
|
|
|
|
};
|
2017-09-18 23:31:00 +00:00
|
|
|
const size_t kDataSize = sizeof(kData);
|
|
|
|
// A short data size (less than leading clear bytes) for SampleAes audio
|
|
|
|
// testing.
|
|
|
|
const size_t kShortDataSize = 14;
|
2017-03-11 02:48:04 +00:00
|
|
|
|
|
|
|
// H264 subsample information for the the above data.
|
|
|
|
const size_t kNaluLengthSize = 1u;
|
|
|
|
const size_t kNaluHeaderSize = 1u;
|
|
|
|
const size_t kSubsampleSize1 = 49u;
|
|
|
|
const size_t kSliceHeaderSize1 = 1u;
|
|
|
|
const size_t kSubsampleSize2 = 50u;
|
|
|
|
const size_t kSliceHeaderSize2 = 16u;
|
|
|
|
const size_t kSubsampleSize3 = 7u;
|
|
|
|
// VP9 frame information for the above data. It should match H264 subsample
|
|
|
|
// information.
|
|
|
|
const size_t kVpxFrameSize1 = kSubsampleSize1;
|
|
|
|
const size_t kUncompressedHeaderSize1 =
|
|
|
|
kNaluLengthSize + kNaluHeaderSize + kSliceHeaderSize1;
|
|
|
|
const size_t kVpxFrameSize2 = kSubsampleSize2;
|
|
|
|
const size_t kUncompressedHeaderSize2 =
|
|
|
|
kNaluLengthSize + kNaluHeaderSize + kSliceHeaderSize2;
|
|
|
|
// Subsample pairs for the above data.
|
|
|
|
const size_t kClearSize1 = kUncompressedHeaderSize1;
|
|
|
|
const size_t kCipherSize1 = kVpxFrameSize1 - kUncompressedHeaderSize1;
|
|
|
|
const size_t kClearSize2 = kUncompressedHeaderSize2;
|
|
|
|
const size_t kCipherSize2 = kVpxFrameSize2 - kUncompressedHeaderSize2;
|
|
|
|
// Align cipher bytes for some protection schemes.
|
|
|
|
const size_t kAesBlockSize = 16u;
|
|
|
|
const size_t kAlignedClearSize1 = kClearSize1 + kCipherSize1 % kAesBlockSize;
|
|
|
|
static_assert(kAlignedClearSize1 != kClearSize1,
|
|
|
|
"Clearsize 1 should not be aligned");
|
|
|
|
const size_t kAlignedCipherSize1 = kCipherSize1 - kCipherSize1 % kAesBlockSize;
|
|
|
|
// Apple Sample AES.
|
|
|
|
const size_t kVideoLeadingClearBytesSize = 32u + kNaluLengthSize;
|
|
|
|
// Subsample 1 is <= 48 bytes, so not encrypted and merged with subsample2.
|
|
|
|
const size_t kSampleAesClearSize1 =
|
|
|
|
kSubsampleSize1 + kVideoLeadingClearBytesSize;
|
|
|
|
const size_t kSampleAesCipherSize1 =
|
|
|
|
kSubsampleSize2 - kVideoLeadingClearBytesSize;
|
2017-02-02 18:28:29 +00:00
|
|
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
inline bool operator==(const SubsampleEntry& lhs, const SubsampleEntry& rhs) {
|
|
|
|
return lhs.clear_bytes == rhs.clear_bytes &&
|
|
|
|
lhs.cipher_bytes == rhs.cipher_bytes;
|
|
|
|
}
|
|
|
|
|
|
|
|
class EncryptionHandlerEncryptionTest
|
|
|
|
: public EncryptionHandlerTest,
|
2017-03-28 15:19:15 +00:00
|
|
|
public WithParamInterface<std::tr1::tuple<FourCC, Codec, bool>> {
|
2017-02-02 18:28:29 +00:00
|
|
|
public:
|
|
|
|
void SetUp() override {
|
|
|
|
protection_scheme_ = std::tr1::get<0>(GetParam());
|
|
|
|
codec_ = std::tr1::get<1>(GetParam());
|
2017-03-28 15:19:15 +00:00
|
|
|
vp9_subsample_encryption_ = std::tr1::get<2>(GetParam());
|
2017-02-02 18:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<VPxFrameInfo> GetMockVpxFrameInfo() {
|
|
|
|
std::vector<VPxFrameInfo> vpx_frames;
|
|
|
|
vpx_frames.resize(2);
|
2017-03-11 02:48:04 +00:00
|
|
|
vpx_frames[0].frame_size = kVpxFrameSize1;
|
|
|
|
vpx_frames[0].uncompressed_header_size = kUncompressedHeaderSize1;
|
|
|
|
vpx_frames[1].frame_size = kVpxFrameSize2;
|
|
|
|
vpx_frames[1].uncompressed_header_size = kUncompressedHeaderSize2;
|
2017-02-02 18:28:29 +00:00
|
|
|
return vpx_frames;
|
|
|
|
}
|
|
|
|
|
|
|
|
// The subsamples values should match |GetMockVpxFrameInfo| above.
|
|
|
|
std::vector<SubsampleEntry> GetExpectedSubsamples() {
|
|
|
|
std::vector<SubsampleEntry> subsamples;
|
2017-03-28 15:19:15 +00:00
|
|
|
if (codec_ == kCodecAAC ||
|
|
|
|
(codec_ == kCodecVP9 && !vp9_subsample_encryption_)) {
|
2017-02-02 18:28:29 +00:00
|
|
|
return subsamples;
|
2017-03-28 15:19:15 +00:00
|
|
|
}
|
2017-03-11 02:48:04 +00:00
|
|
|
if (protection_scheme_ == kAppleSampleAesProtectionScheme) {
|
2017-03-22 16:57:18 +00:00
|
|
|
subsamples.emplace_back(static_cast<uint16_t>(kSampleAesClearSize1),
|
|
|
|
static_cast<uint32_t>(kSampleAesCipherSize1));
|
|
|
|
subsamples.emplace_back(static_cast<uint16_t>(kSubsampleSize3), 0u);
|
2017-02-02 18:28:29 +00:00
|
|
|
} else {
|
2017-03-11 02:48:04 +00:00
|
|
|
if (codec_ == kCodecVP9 || protection_scheme_ == FOURCC_cbc1 ||
|
2017-03-15 19:42:00 +00:00
|
|
|
protection_scheme_ == FOURCC_cens ||
|
|
|
|
protection_scheme_ == FOURCC_cenc) {
|
2017-03-11 02:48:04 +00:00
|
|
|
// Align the encrypted bytes to multiple of 16 bytes.
|
2017-03-22 16:57:18 +00:00
|
|
|
subsamples.emplace_back(static_cast<uint16_t>(kAlignedClearSize1),
|
|
|
|
static_cast<uint32_t>(kAlignedCipherSize1));
|
2017-03-11 02:48:04 +00:00
|
|
|
// Subsample 2 is already aligned.
|
|
|
|
} else {
|
2017-03-22 16:57:18 +00:00
|
|
|
subsamples.emplace_back(static_cast<uint16_t>(kClearSize1),
|
|
|
|
static_cast<uint32_t>(kCipherSize1));
|
2017-03-11 02:48:04 +00:00
|
|
|
}
|
2017-03-22 16:57:18 +00:00
|
|
|
subsamples.emplace_back(static_cast<uint16_t>(kClearSize2),
|
|
|
|
static_cast<uint32_t>(kCipherSize2));
|
|
|
|
subsamples.emplace_back(static_cast<uint16_t>(kSubsampleSize3), 0u);
|
2017-02-02 18:28:29 +00:00
|
|
|
}
|
|
|
|
return subsamples;
|
|
|
|
}
|
|
|
|
|
2017-03-11 02:48:04 +00:00
|
|
|
// Inject vpx parser / video slice header parser if needed.
|
|
|
|
void InjectCodecParser() {
|
|
|
|
switch (codec_) {
|
2017-03-28 15:19:15 +00:00
|
|
|
case kCodecVP9:
|
|
|
|
if (vp9_subsample_encryption_) {
|
|
|
|
std::unique_ptr<MockVpxParser> mock_vpx_parser(new MockVpxParser);
|
2017-09-18 23:31:00 +00:00
|
|
|
EXPECT_CALL(*mock_vpx_parser, Parse(_, kDataSize, _))
|
2017-03-28 15:19:15 +00:00
|
|
|
.WillRepeatedly(
|
|
|
|
DoAll(SetArgPointee<2>(GetMockVpxFrameInfo()), Return(true)));
|
|
|
|
InjectVpxParserForTesting(std::move(mock_vpx_parser));
|
|
|
|
}
|
2017-03-11 02:48:04 +00:00
|
|
|
break;
|
|
|
|
case kCodecH264: {
|
|
|
|
std::unique_ptr<MockVideoSliceHeaderParser> mock_header_parser(
|
|
|
|
new MockVideoSliceHeaderParser);
|
|
|
|
if (protection_scheme_ == kAppleSampleAesProtectionScheme) {
|
|
|
|
EXPECT_CALL(*mock_header_parser, GetHeaderSize(_)).Times(0);
|
|
|
|
} else {
|
|
|
|
EXPECT_CALL(*mock_header_parser, GetHeaderSize(_))
|
|
|
|
.WillOnce(Return(kSliceHeaderSize1))
|
2017-06-20 23:30:03 +00:00
|
|
|
.WillOnce(Return(kSliceHeaderSize2))
|
|
|
|
.WillRepeatedly(Return(kSliceHeaderSize2));
|
2017-03-11 02:48:04 +00:00
|
|
|
}
|
|
|
|
InjectVideoSliceHeaderParserForTesting(std::move(mock_header_parser));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
2017-02-02 18:28:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Decrypt(const DecryptConfig& decrypt_config,
|
|
|
|
uint8_t* data,
|
|
|
|
size_t data_size) {
|
2017-03-11 02:48:04 +00:00
|
|
|
size_t leading_clear_bytes_size = 0;
|
2017-02-02 18:28:29 +00:00
|
|
|
std::unique_ptr<AesCryptor> aes_decryptor;
|
|
|
|
switch (decrypt_config.protection_scheme()) {
|
|
|
|
case FOURCC_cenc:
|
|
|
|
aes_decryptor.reset(new AesCtrDecryptor);
|
|
|
|
break;
|
|
|
|
case FOURCC_cbc1:
|
|
|
|
aes_decryptor.reset(new AesCbcDecryptor(kNoPadding));
|
|
|
|
break;
|
|
|
|
case FOURCC_cens:
|
|
|
|
aes_decryptor.reset(new AesPatternCryptor(
|
|
|
|
decrypt_config.crypt_byte_block(), decrypt_config.skip_byte_block(),
|
|
|
|
AesPatternCryptor::kEncryptIfCryptByteBlockRemaining,
|
|
|
|
AesCryptor::kDontUseConstantIv,
|
|
|
|
std::unique_ptr<AesCryptor>(new AesCtrDecryptor())));
|
|
|
|
break;
|
|
|
|
case FOURCC_cbcs:
|
|
|
|
aes_decryptor.reset(new AesPatternCryptor(
|
|
|
|
decrypt_config.crypt_byte_block(), decrypt_config.skip_byte_block(),
|
|
|
|
AesPatternCryptor::kEncryptIfCryptByteBlockRemaining,
|
|
|
|
AesCryptor::kUseConstantIv,
|
|
|
|
std::unique_ptr<AesCryptor>(new AesCbcDecryptor(kNoPadding))));
|
|
|
|
break;
|
2017-03-11 02:48:04 +00:00
|
|
|
case kAppleSampleAesProtectionScheme:
|
|
|
|
if (decrypt_config.crypt_byte_block() == 0 &&
|
|
|
|
decrypt_config.skip_byte_block() == 0) {
|
|
|
|
const size_t kAudioLeadingClearBytesSize = 16u;
|
|
|
|
// Only needed for audio; for video, it is already taken into
|
|
|
|
// consideration in subsamples.
|
|
|
|
leading_clear_bytes_size = kAudioLeadingClearBytesSize;
|
|
|
|
aes_decryptor.reset(
|
|
|
|
new AesCbcDecryptor(kNoPadding, AesCryptor::kUseConstantIv));
|
|
|
|
} else {
|
|
|
|
aes_decryptor.reset(new AesPatternCryptor(
|
|
|
|
decrypt_config.crypt_byte_block(),
|
|
|
|
decrypt_config.skip_byte_block(),
|
|
|
|
AesPatternCryptor::kSkipIfCryptByteBlockRemaining,
|
|
|
|
AesCryptor::kUseConstantIv,
|
|
|
|
std::unique_ptr<AesCryptor>(new AesCbcDecryptor(kNoPadding))));
|
|
|
|
}
|
|
|
|
break;
|
2017-02-02 18:28:29 +00:00
|
|
|
default:
|
|
|
|
LOG(FATAL) << "Not supposed to happen.";
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!aes_decryptor->InitializeWithIv(
|
|
|
|
std::vector<uint8_t>(kKey, kKey + sizeof(kKey)),
|
|
|
|
decrypt_config.iv())) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (decrypt_config.subsamples().empty()) {
|
|
|
|
// Sample not encrypted using subsample encryption. Decrypt whole.
|
2017-03-11 02:48:04 +00:00
|
|
|
if (!aes_decryptor->Crypt(data + leading_clear_bytes_size,
|
|
|
|
data_size - leading_clear_bytes_size,
|
|
|
|
data + leading_clear_bytes_size)) {
|
2017-02-02 18:28:29 +00:00
|
|
|
LOG(ERROR) << "Error during bulk sample decryption.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Subsample decryption.
|
|
|
|
const std::vector<SubsampleEntry>& subsamples = decrypt_config.subsamples();
|
|
|
|
uint8_t* current_ptr = data;
|
|
|
|
const uint8_t* const buffer_end = data + data_size;
|
|
|
|
for (const auto& subsample : subsamples) {
|
|
|
|
if (current_ptr + subsample.clear_bytes + subsample.cipher_bytes >
|
|
|
|
buffer_end) {
|
|
|
|
LOG(ERROR) << "Subsamples overflow sample buffer.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
current_ptr += subsample.clear_bytes;
|
|
|
|
if (!aes_decryptor->Crypt(current_ptr, subsample.cipher_bytes,
|
|
|
|
current_ptr)) {
|
|
|
|
LOG(ERROR) << "Error decrypting subsample buffer.";
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
current_ptr += subsample.cipher_bytes;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t GetExpectedCryptByteBlock() {
|
2017-03-11 02:48:04 +00:00
|
|
|
if (protection_scheme_ == kAppleSampleAesProtectionScheme) {
|
|
|
|
// Audio is whole sample encrypted. We could not use a
|
|
|
|
// crypto_byte_block_ of 1 for audio as if there is one crypto block
|
|
|
|
// remaining, it need not be encrypted for video but it needs to be
|
|
|
|
// encrypted for audio.
|
|
|
|
return codec_ == kCodecAAC ? 0 : 1;
|
|
|
|
}
|
2017-02-02 18:28:29 +00:00
|
|
|
switch (protection_scheme_) {
|
|
|
|
case FOURCC_cenc:
|
|
|
|
case FOURCC_cbc1:
|
|
|
|
return 0;
|
|
|
|
case FOURCC_cens:
|
|
|
|
case FOURCC_cbcs:
|
|
|
|
return 1;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
uint8_t GetExpectedSkipByteBlock() {
|
|
|
|
// Always use full sample encryption for audio.
|
|
|
|
if (codec_ == kCodecAAC)
|
|
|
|
return 0;
|
|
|
|
switch (protection_scheme_) {
|
|
|
|
case FOURCC_cenc:
|
|
|
|
case FOURCC_cbc1:
|
|
|
|
return 0;
|
|
|
|
case FOURCC_cens:
|
|
|
|
case FOURCC_cbcs:
|
2017-03-11 02:48:04 +00:00
|
|
|
case kAppleSampleAesProtectionScheme:
|
2017-02-02 18:28:29 +00:00
|
|
|
return 9;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-03-11 02:48:04 +00:00
|
|
|
uint8_t GetExpectedPerSampleIvSize() {
|
|
|
|
switch (protection_scheme_) {
|
|
|
|
case FOURCC_cenc:
|
|
|
|
case FOURCC_cens:
|
|
|
|
case FOURCC_cbc1:
|
|
|
|
return sizeof(kIv);
|
|
|
|
case FOURCC_cbcs:
|
|
|
|
case kAppleSampleAesProtectionScheme:
|
|
|
|
return 0;
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<uint8_t> GetExpectedConstantIv() {
|
|
|
|
switch (protection_scheme_) {
|
|
|
|
case FOURCC_cbcs:
|
|
|
|
case kAppleSampleAesProtectionScheme:
|
|
|
|
return std::vector<uint8_t>(std::begin(kIv), std::end(kIv));
|
|
|
|
default:
|
|
|
|
return std::vector<uint8_t>();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-02 18:28:29 +00:00
|
|
|
protected:
|
|
|
|
FourCC protection_scheme_;
|
|
|
|
Codec codec_;
|
2017-03-28 15:19:15 +00:00
|
|
|
bool vp9_subsample_encryption_;
|
2017-02-02 18:28:29 +00:00
|
|
|
};
|
|
|
|
|
2017-03-11 02:48:04 +00:00
|
|
|
TEST_P(EncryptionHandlerEncryptionTest, ClearLeadWithNoKeyRotation) {
|
|
|
|
const double kClearLeadInSeconds = 1.5 * kSegmentDuration / kTimeScale;
|
2017-07-05 23:47:55 +00:00
|
|
|
EncryptionParams encryption_params;
|
|
|
|
encryption_params.protection_scheme = protection_scheme_;
|
|
|
|
encryption_params.clear_lead_in_seconds = kClearLeadInSeconds;
|
|
|
|
encryption_params.vp9_subsample_encryption = vp9_subsample_encryption_;
|
|
|
|
SetUpEncryptionHandler(encryption_params);
|
2017-03-11 02:48:04 +00:00
|
|
|
|
|
|
|
const EncryptionKey mock_encryption_key = GetMockEncryptionKey();
|
|
|
|
EXPECT_CALL(mock_key_source_, GetKey(_, _))
|
|
|
|
.WillOnce(
|
|
|
|
DoAll(SetArgPointee<1>(mock_encryption_key), Return(Status::OK)));
|
2017-09-12 17:24:24 +00:00
|
|
|
|
|
|
|
if (IsVideoCodec(codec_)) {
|
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
|
|
kStreamIndex, GetVideoStreamInfo(kTimeScale, codec_))));
|
|
|
|
} else {
|
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
|
|
kStreamIndex, GetAudioStreamInfo(kTimeScale, codec_))));
|
|
|
|
}
|
|
|
|
|
2018-05-25 17:41:02 +00:00
|
|
|
EXPECT_THAT(
|
|
|
|
GetOutputStreamDataVector(),
|
|
|
|
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted, _)));
|
2017-05-09 22:49:01 +00:00
|
|
|
const StreamInfo* stream_info =
|
|
|
|
GetOutputStreamDataVector().back()->stream_info.get();
|
|
|
|
ASSERT_TRUE(stream_info);
|
|
|
|
EXPECT_TRUE(stream_info->has_clear_lead());
|
|
|
|
EXPECT_THAT(stream_info->encryption_config(),
|
2017-03-11 02:48:04 +00:00
|
|
|
MatchEncryptionConfig(
|
|
|
|
protection_scheme_, GetExpectedCryptByteBlock(),
|
|
|
|
GetExpectedSkipByteBlock(), GetExpectedPerSampleIvSize(),
|
|
|
|
GetExpectedConstantIv(), mock_encryption_key.key_id));
|
|
|
|
ClearOutputStreamDataVector();
|
|
|
|
Mock::VerifyAndClearExpectations(&mock_key_source_);
|
|
|
|
|
|
|
|
InjectCodecParser();
|
|
|
|
|
|
|
|
// There are three segments. Only the third segment is encrypted.
|
|
|
|
for (int i = 0; i < 3; ++i) {
|
|
|
|
// Use single-frame segment for testing.
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
2017-09-18 23:31:00 +00:00
|
|
|
kStreamIndex, GetMediaSample(i * kSegmentDuration, kSegmentDuration,
|
|
|
|
kIsKeyFrame, kData, kDataSize))));
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_OK(Process(StreamData::FromSegmentInfo(
|
2018-05-25 17:41:02 +00:00
|
|
|
kStreamIndex, GetSegmentInfo(i * kSegmentDuration, kSegmentDuration,
|
|
|
|
!kIsSubsegment))));
|
2017-03-11 02:48:04 +00:00
|
|
|
const bool is_encrypted = i == 2;
|
|
|
|
const auto& output_stream_data = GetOutputStreamDataVector();
|
|
|
|
EXPECT_THAT(output_stream_data,
|
|
|
|
ElementsAre(IsMediaSample(kStreamIndex, i * kSegmentDuration,
|
2018-06-14 23:39:59 +00:00
|
|
|
kSegmentDuration, is_encrypted, _),
|
2017-03-11 02:48:04 +00:00
|
|
|
IsSegmentInfo(kStreamIndex, i * kSegmentDuration,
|
|
|
|
kSegmentDuration, !kIsSubsegment,
|
|
|
|
is_encrypted)));
|
|
|
|
EXPECT_FALSE(output_stream_data.back()
|
|
|
|
->segment_info->key_rotation_encryption_config);
|
|
|
|
ClearOutputStreamDataVector();
|
|
|
|
}
|
|
|
|
}
|
2017-02-02 18:28:29 +00:00
|
|
|
|
2017-03-11 02:48:04 +00:00
|
|
|
TEST_P(EncryptionHandlerEncryptionTest, ClearLeadWithKeyRotation) {
|
|
|
|
const double kClearLeadInSeconds = 1.5 * kSegmentDuration / kTimeScale;
|
2017-06-20 23:30:03 +00:00
|
|
|
const int kSegmentsPerCryptoPeriod = 2; // 2 segments.
|
|
|
|
const double kCryptoPeriodDurationInSeconds =
|
|
|
|
kSegmentsPerCryptoPeriod * kSegmentDuration / kTimeScale;
|
2017-07-05 23:47:55 +00:00
|
|
|
EncryptionParams encryption_params;
|
|
|
|
encryption_params.protection_scheme = protection_scheme_;
|
|
|
|
encryption_params.clear_lead_in_seconds = kClearLeadInSeconds;
|
|
|
|
encryption_params.crypto_period_duration_in_seconds =
|
2017-03-11 02:48:04 +00:00
|
|
|
kCryptoPeriodDurationInSeconds;
|
2017-07-05 23:47:55 +00:00
|
|
|
encryption_params.vp9_subsample_encryption = vp9_subsample_encryption_;
|
|
|
|
SetUpEncryptionHandler(encryption_params);
|
2017-03-11 02:48:04 +00:00
|
|
|
|
2017-09-12 17:24:24 +00:00
|
|
|
if (IsVideoCodec(codec_)) {
|
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
|
|
kStreamIndex, GetVideoStreamInfo(kTimeScale, codec_))));
|
|
|
|
} else {
|
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
|
|
kStreamIndex, GetAudioStreamInfo(kTimeScale, codec_))));
|
|
|
|
}
|
|
|
|
|
2018-05-25 17:41:02 +00:00
|
|
|
EXPECT_THAT(
|
|
|
|
GetOutputStreamDataVector(),
|
|
|
|
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted, _)));
|
2017-05-09 22:49:01 +00:00
|
|
|
const StreamInfo* stream_info =
|
|
|
|
GetOutputStreamDataVector().back()->stream_info.get();
|
|
|
|
ASSERT_TRUE(stream_info);
|
|
|
|
EXPECT_TRUE(stream_info->has_clear_lead());
|
|
|
|
const EncryptionConfig& encryption_config = stream_info->encryption_config();
|
2017-03-11 02:48:04 +00:00
|
|
|
EXPECT_EQ(protection_scheme_, encryption_config.protection_scheme);
|
|
|
|
EXPECT_EQ(GetExpectedCryptByteBlock(), encryption_config.crypt_byte_block);
|
|
|
|
EXPECT_EQ(GetExpectedSkipByteBlock(), encryption_config.skip_byte_block);
|
|
|
|
EXPECT_EQ(std::vector<uint8_t>(std::begin(kKeyRotationDefaultKeyId),
|
|
|
|
std::end(kKeyRotationDefaultKeyId)),
|
|
|
|
encryption_config.key_id);
|
|
|
|
ClearOutputStreamDataVector();
|
|
|
|
|
|
|
|
InjectCodecParser();
|
|
|
|
|
2017-06-20 23:30:03 +00:00
|
|
|
// There are five segments with the first two not encrypted.
|
|
|
|
for (int i = 0; i < 5; ++i) {
|
|
|
|
if ((i % kSegmentsPerCryptoPeriod) == 0) {
|
|
|
|
EXPECT_CALL(mock_key_source_,
|
|
|
|
GetCryptoPeriodKey(i / kSegmentsPerCryptoPeriod, _, _))
|
|
|
|
.WillOnce(DoAll(SetArgPointee<2>(GetMockEncryptionKey()),
|
|
|
|
Return(Status::OK)));
|
|
|
|
}
|
2017-03-11 02:48:04 +00:00
|
|
|
// Use single-frame segment for testing.
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
2017-09-18 23:31:00 +00:00
|
|
|
kStreamIndex, GetMediaSample(i * kSegmentDuration, kSegmentDuration,
|
|
|
|
kIsKeyFrame, kData, kDataSize))));
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_OK(Process(StreamData::FromSegmentInfo(
|
2018-05-25 17:41:02 +00:00
|
|
|
kStreamIndex, GetSegmentInfo(i * kSegmentDuration, kSegmentDuration,
|
|
|
|
!kIsSubsegment))));
|
2017-06-20 23:30:03 +00:00
|
|
|
const bool is_encrypted = i >= 2;
|
2017-03-11 02:48:04 +00:00
|
|
|
const auto& output_stream_data = GetOutputStreamDataVector();
|
|
|
|
EXPECT_THAT(output_stream_data,
|
|
|
|
ElementsAre(IsMediaSample(kStreamIndex, i * kSegmentDuration,
|
2018-06-14 23:39:59 +00:00
|
|
|
kSegmentDuration, is_encrypted, _),
|
2017-03-11 02:48:04 +00:00
|
|
|
IsSegmentInfo(kStreamIndex, i * kSegmentDuration,
|
|
|
|
kSegmentDuration, !kIsSubsegment,
|
|
|
|
is_encrypted)));
|
|
|
|
EXPECT_THAT(*output_stream_data.back()
|
|
|
|
->segment_info->key_rotation_encryption_config,
|
|
|
|
MatchEncryptionConfig(
|
|
|
|
protection_scheme_, GetExpectedCryptByteBlock(),
|
|
|
|
GetExpectedSkipByteBlock(), GetExpectedPerSampleIvSize(),
|
|
|
|
GetExpectedConstantIv(), GetMockEncryptionKey().key_id));
|
|
|
|
Mock::VerifyAndClearExpectations(&mock_key_source_);
|
|
|
|
ClearOutputStreamDataVector();
|
2017-02-02 18:28:29 +00:00
|
|
|
}
|
2017-03-11 02:48:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
TEST_P(EncryptionHandlerEncryptionTest, Encrypt) {
|
2017-07-05 23:47:55 +00:00
|
|
|
EncryptionParams encryption_params;
|
|
|
|
encryption_params.protection_scheme = protection_scheme_;
|
|
|
|
encryption_params.vp9_subsample_encryption = vp9_subsample_encryption_;
|
|
|
|
SetUpEncryptionHandler(encryption_params);
|
2017-03-11 02:48:04 +00:00
|
|
|
|
|
|
|
const EncryptionKey mock_encryption_key = GetMockEncryptionKey();
|
|
|
|
EXPECT_CALL(mock_key_source_, GetKey(_, _))
|
|
|
|
.WillOnce(
|
|
|
|
DoAll(SetArgPointee<1>(mock_encryption_key), Return(Status::OK)));
|
|
|
|
|
2017-09-12 17:24:24 +00:00
|
|
|
if (IsVideoCodec(codec_)) {
|
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
|
|
kStreamIndex, GetVideoStreamInfo(kTimeScale, codec_))));
|
|
|
|
} else {
|
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
|
|
kStreamIndex, GetAudioStreamInfo(kTimeScale, codec_))));
|
|
|
|
}
|
|
|
|
|
2018-05-25 17:41:02 +00:00
|
|
|
EXPECT_THAT(
|
|
|
|
GetOutputStreamDataVector(),
|
|
|
|
ElementsAre(IsStreamInfo(kStreamIndex, kTimeScale, kEncrypted, _)));
|
2017-05-09 22:49:01 +00:00
|
|
|
const StreamInfo* stream_info =
|
|
|
|
GetOutputStreamDataVector().back()->stream_info.get();
|
|
|
|
ASSERT_TRUE(stream_info);
|
|
|
|
EXPECT_FALSE(stream_info->has_clear_lead());
|
2017-03-11 02:48:04 +00:00
|
|
|
|
|
|
|
InjectCodecParser();
|
2017-02-02 18:28:29 +00:00
|
|
|
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
|
|
|
kStreamIndex,
|
2017-09-18 23:31:00 +00:00
|
|
|
GetMediaSample(0, kSampleDuration, kIsKeyFrame, kData, kDataSize))));
|
Implement ChunkingHandler
This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.
Consistent Chunking Algorithm:
1. Find the consistent chunkable boundary
Let the timestamps for video frames be (t1, t2, t3, ...). Then a
consistent chunkable boundary is simply the first chunkable boundary after
(tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
intended chunk duration.
2. Chunk only at the consistent chunkable boundary
This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.
Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
2017-02-07 18:58:47 +00:00
|
|
|
ASSERT_EQ(2u, GetOutputStreamDataVector().size());
|
2017-03-11 02:48:04 +00:00
|
|
|
ASSERT_EQ(kStreamIndex, GetOutputStreamDataVector().back()->stream_index);
|
2017-02-02 18:28:29 +00:00
|
|
|
ASSERT_EQ(StreamDataType::kMediaSample,
|
Implement ChunkingHandler
This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.
Consistent Chunking Algorithm:
1. Find the consistent chunkable boundary
Let the timestamps for video frames be (t1, t2, t3, ...). Then a
consistent chunkable boundary is simply the first chunkable boundary after
(tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
intended chunk duration.
2. Chunk only at the consistent chunkable boundary
This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.
Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
2017-02-07 18:58:47 +00:00
|
|
|
GetOutputStreamDataVector().back()->stream_data_type);
|
2017-02-02 18:28:29 +00:00
|
|
|
|
Implement ChunkingHandler
This handler is a multi-in multi-out handler. If more than one input is
provided, there should be one and only one video stream; also, all inputs
should come from the same thread and are synchronized.
There can be multiple chunking handler running in different threads or even
different processes, we use the "consistent chunking algorithm" to make sure
the chunks in different streams are aligned without explicit communcating
with each other - which is not efficient and often difficult.
Consistent Chunking Algorithm:
1. Find the consistent chunkable boundary
Let the timestamps for video frames be (t1, t2, t3, ...). Then a
consistent chunkable boundary is simply the first chunkable boundary after
(tk / N) != (tk-1 / N), where '/' denotes integer division, and N is the
intended chunk duration.
2. Chunk only at the consistent chunkable boundary
This algorithm will make sure the chunks from different video streams are
aligned if they have aligned GoPs. However, this algorithm will only work
for video streams. To be able to chunk non video streams at similar
positions as video streams, ChunkingHandler is designed to accept one video
input and multiple non video inputs, the non video inputs are chunked when
the video input is chunked. If the inputs are synchronized - which is true
if the inputs come from the same demuxer, the video and non video chunks
are aligned.
Change-Id: Id3bad51ab14f311efdb8713b6cd36d36cf9e4639
2017-02-07 18:58:47 +00:00
|
|
|
auto* media_sample = GetOutputStreamDataVector().back()->media_sample.get();
|
2017-02-02 18:28:29 +00:00
|
|
|
auto* decrypt_config = media_sample->decrypt_config();
|
|
|
|
EXPECT_EQ(std::vector<uint8_t>(kKeyId, kKeyId + sizeof(kKeyId)),
|
|
|
|
decrypt_config->key_id());
|
|
|
|
EXPECT_EQ(std::vector<uint8_t>(kIv, kIv + sizeof(kIv)), decrypt_config->iv());
|
|
|
|
EXPECT_EQ(GetExpectedSubsamples(), decrypt_config->subsamples());
|
|
|
|
EXPECT_EQ(protection_scheme_, decrypt_config->protection_scheme());
|
|
|
|
EXPECT_EQ(GetExpectedCryptByteBlock(), decrypt_config->crypt_byte_block());
|
|
|
|
EXPECT_EQ(GetExpectedSkipByteBlock(), decrypt_config->skip_byte_block());
|
|
|
|
|
2017-09-18 23:31:00 +00:00
|
|
|
std::vector<uint8_t> expected(kData, kData + kDataSize);
|
|
|
|
std::vector<uint8_t> actual(media_sample->data(),
|
|
|
|
media_sample->data() + media_sample->data_size());
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_TRUE(Decrypt(*decrypt_config, actual.data(), actual.size()));
|
|
|
|
EXPECT_EQ(expected, actual);
|
2017-02-02 18:28:29 +00:00
|
|
|
}
|
|
|
|
|
2017-09-18 23:31:00 +00:00
|
|
|
// Verify that the data in short audio (less than leading clear bytes) is left
|
|
|
|
// unencrypted.
|
|
|
|
TEST_P(EncryptionHandlerEncryptionTest, SampleAesEncryptShortAudio) {
|
|
|
|
if (IsVideoCodec(codec_) ||
|
|
|
|
protection_scheme_ != kAppleSampleAesProtectionScheme) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
EncryptionParams encryption_params;
|
|
|
|
encryption_params.protection_scheme = kAppleSampleAesProtectionScheme;
|
|
|
|
SetUpEncryptionHandler(encryption_params);
|
|
|
|
|
|
|
|
const EncryptionKey mock_encryption_key = GetMockEncryptionKey();
|
|
|
|
EXPECT_CALL(mock_key_source_, GetKey(_, _))
|
|
|
|
.WillOnce(
|
|
|
|
DoAll(SetArgPointee<1>(mock_encryption_key), Return(Status::OK)));
|
|
|
|
|
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
|
|
|
kStreamIndex, GetAudioStreamInfo(kTimeScale, codec_))));
|
|
|
|
|
|
|
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
|
|
|
kStreamIndex,
|
|
|
|
GetMediaSample(0, kSampleDuration, kIsKeyFrame, kData, kShortDataSize))));
|
|
|
|
ASSERT_EQ(2u, GetOutputStreamDataVector().size());
|
|
|
|
ASSERT_EQ(kStreamIndex, GetOutputStreamDataVector().back()->stream_index);
|
|
|
|
ASSERT_EQ(StreamDataType::kMediaSample,
|
|
|
|
GetOutputStreamDataVector().back()->stream_data_type);
|
|
|
|
|
|
|
|
auto* media_sample = GetOutputStreamDataVector().back()->media_sample.get();
|
|
|
|
auto* decrypt_config = media_sample->decrypt_config();
|
|
|
|
EXPECT_TRUE(decrypt_config->subsamples().empty());
|
|
|
|
EXPECT_EQ(kAppleSampleAesProtectionScheme,
|
|
|
|
decrypt_config->protection_scheme());
|
|
|
|
|
|
|
|
std::vector<uint8_t> expected(kData, kData + kShortDataSize);
|
|
|
|
std::vector<uint8_t> actual(media_sample->data(),
|
|
|
|
media_sample->data() + media_sample->data_size());
|
|
|
|
EXPECT_EQ(expected, actual);
|
|
|
|
}
|
|
|
|
|
2017-02-02 18:28:29 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(
|
2017-03-11 02:48:04 +00:00
|
|
|
CencProtectionSchemes,
|
2017-02-02 18:28:29 +00:00
|
|
|
EncryptionHandlerEncryptionTest,
|
|
|
|
Combine(Values(FOURCC_cenc, FOURCC_cens, FOURCC_cbc1, FOURCC_cbcs),
|
2017-03-28 15:19:15 +00:00
|
|
|
Values(kCodecAAC, kCodecH264, kCodecVP9),
|
|
|
|
Values(kVp9SubsampleEncryption, !kVp9SubsampleEncryption)));
|
2017-03-11 02:48:04 +00:00
|
|
|
INSTANTIATE_TEST_CASE_P(AppleSampleAes,
|
|
|
|
EncryptionHandlerEncryptionTest,
|
|
|
|
Combine(Values(kAppleSampleAesProtectionScheme),
|
2017-03-28 15:19:15 +00:00
|
|
|
Values(kCodecAAC, kCodecH264),
|
|
|
|
Values(kVp9SubsampleEncryption)));
|
2017-03-11 02:48:04 +00:00
|
|
|
|
2017-05-22 20:31:41 +00:00
|
|
|
class EncryptionHandlerTrackTypeTest : public EncryptionHandlerTest {
|
2017-03-11 02:48:04 +00:00
|
|
|
public:
|
2018-05-25 17:41:02 +00:00
|
|
|
void SetUp() override {}
|
2017-03-11 02:48:04 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
TEST_F(EncryptionHandlerTrackTypeTest, AudioTrackType) {
|
2017-05-22 20:31:41 +00:00
|
|
|
EncryptionParams::EncryptedStreamAttributes captured_stream_attributes;
|
2017-07-05 23:47:55 +00:00
|
|
|
EncryptionParams encryption_params;
|
|
|
|
encryption_params.stream_label_func =
|
2017-05-22 20:31:41 +00:00
|
|
|
[&captured_stream_attributes](
|
|
|
|
const EncryptionParams::EncryptedStreamAttributes&
|
|
|
|
stream_attributes) {
|
|
|
|
captured_stream_attributes = stream_attributes;
|
|
|
|
return kAudioStreamLabel;
|
|
|
|
};
|
2017-07-05 23:47:55 +00:00
|
|
|
SetUpEncryptionHandler(encryption_params);
|
2017-06-13 21:54:12 +00:00
|
|
|
EXPECT_CALL(mock_key_source_, GetKey(kAudioStreamLabel, _))
|
2017-03-11 02:48:04 +00:00
|
|
|
.WillOnce(
|
|
|
|
DoAll(SetArgPointee<1>(GetMockEncryptionKey()), Return(Status::OK)));
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
2018-05-25 17:41:02 +00:00
|
|
|
kStreamIndex, GetAudioStreamInfo(kTimeScale))));
|
2017-05-22 20:31:41 +00:00
|
|
|
EXPECT_EQ(EncryptionParams::EncryptedStreamAttributes::kAudio,
|
|
|
|
captured_stream_attributes.stream_type);
|
2017-03-11 02:48:04 +00:00
|
|
|
}
|
|
|
|
|
2017-05-22 20:31:41 +00:00
|
|
|
TEST_F(EncryptionHandlerTrackTypeTest, VideoTrackType) {
|
2017-09-12 17:24:24 +00:00
|
|
|
const int32_t kWidth = 12;
|
|
|
|
const int32_t kHeight = 34;
|
2017-05-22 20:31:41 +00:00
|
|
|
EncryptionParams::EncryptedStreamAttributes captured_stream_attributes;
|
2017-07-05 23:47:55 +00:00
|
|
|
EncryptionParams encryption_params;
|
|
|
|
encryption_params.stream_label_func =
|
2017-05-22 20:31:41 +00:00
|
|
|
[&captured_stream_attributes](
|
|
|
|
const EncryptionParams::EncryptedStreamAttributes&
|
|
|
|
stream_attributes) {
|
|
|
|
captured_stream_attributes = stream_attributes;
|
|
|
|
return kSdVideoStreamLabel;
|
|
|
|
};
|
2017-07-05 23:47:55 +00:00
|
|
|
SetUpEncryptionHandler(encryption_params);
|
2017-06-13 21:54:12 +00:00
|
|
|
EXPECT_CALL(mock_key_source_, GetKey(kSdVideoStreamLabel, _))
|
2017-03-11 02:48:04 +00:00
|
|
|
.WillOnce(
|
|
|
|
DoAll(SetArgPointee<1>(GetMockEncryptionKey()), Return(Status::OK)));
|
2017-09-12 17:24:24 +00:00
|
|
|
ASSERT_OK(Process(StreamData::FromStreamInfo(
|
2018-05-25 17:41:02 +00:00
|
|
|
kStreamIndex, GetVideoStreamInfo(kTimeScale, kWidth, kHeight))));
|
2017-05-22 20:31:41 +00:00
|
|
|
EXPECT_EQ(EncryptionParams::EncryptedStreamAttributes::kVideo,
|
|
|
|
captured_stream_attributes.stream_type);
|
2017-09-12 17:24:24 +00:00
|
|
|
EXPECT_EQ(captured_stream_attributes.oneof.video.width, kWidth);
|
|
|
|
EXPECT_EQ(captured_stream_attributes.oneof.video.height, kHeight);
|
2017-03-11 02:48:04 +00:00
|
|
|
}
|
2017-02-02 18:28:29 +00:00
|
|
|
|
|
|
|
} // namespace media
|
|
|
|
} // namespace shaka
|