Change CueEvent to use time in seconds
Change-Id: I03527db76905f25f0a142940f5fcf73e2a92d42f
This commit is contained in:
parent
f2f88c159e
commit
d04f0ef00f
|
@ -45,8 +45,8 @@ enum class CueEventType { kCueIn, kCueOut, kCuePoint };
|
||||||
// In server-based model, Chunking Handler consolidates SCTE-35 events and
|
// In server-based model, Chunking Handler consolidates SCTE-35 events and
|
||||||
// generates CueEvent before an ad is about to be inserted.
|
// generates CueEvent before an ad is about to be inserted.
|
||||||
struct CueEvent {
|
struct CueEvent {
|
||||||
int64_t timestamp = 0;
|
|
||||||
CueEventType type = CueEventType::kCuePoint;
|
CueEventType type = CueEventType::kCuePoint;
|
||||||
|
double time_in_seconds;
|
||||||
std::string cue_data;
|
std::string cue_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -139,16 +139,16 @@ MATCHER_P5(IsTextSample, id, start_time, end_time, settings, payload, "") {
|
||||||
arg->text_sample->payload() == payload;
|
arg->text_sample->payload() == payload;
|
||||||
}
|
}
|
||||||
|
|
||||||
MATCHER_P2(IsCueEvent, stream_index, timestamp, "") {
|
MATCHER_P2(IsCueEvent, stream_index, time_in_seconds, "") {
|
||||||
if (arg->stream_data_type != StreamDataType::kCueEvent) {
|
if (arg->stream_data_type != StreamDataType::kCueEvent) {
|
||||||
*result_listener << "which is "
|
*result_listener << "which is "
|
||||||
<< StreamDataTypeToString(arg->stream_data_type);
|
<< StreamDataTypeToString(arg->stream_data_type);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
*result_listener << "which is (" << arg->stream_index << ","
|
*result_listener << "which is (" << arg->stream_index << ","
|
||||||
<< arg->cue_event->timestamp << ")";
|
<< arg->cue_event->time_in_seconds << ")";
|
||||||
return arg->stream_index == stream_index &&
|
return arg->stream_index == stream_index &&
|
||||||
arg->cue_event->timestamp == timestamp;
|
arg->cue_event->time_in_seconds == time_in_seconds;
|
||||||
}
|
}
|
||||||
|
|
||||||
class FakeInputMediaHandler : public MediaHandler {
|
class FakeInputMediaHandler : public MediaHandler {
|
||||||
|
|
|
@ -70,11 +70,15 @@ Status Muxer::Process(std::unique_ptr<StreamData> stream_data) {
|
||||||
return FinalizeSegment(stream_data->stream_index, segment_info);
|
return FinalizeSegment(stream_data->stream_index, segment_info);
|
||||||
}
|
}
|
||||||
case StreamDataType::kMediaSample:
|
case StreamDataType::kMediaSample:
|
||||||
return AddSample(stream_data->stream_index,
|
return AddSample(stream_data->stream_index, *stream_data->media_sample);
|
||||||
*stream_data->media_sample);
|
|
||||||
case StreamDataType::kCueEvent:
|
case StreamDataType::kCueEvent:
|
||||||
if (muxer_listener_) {
|
if (muxer_listener_) {
|
||||||
muxer_listener_->OnCueEvent(stream_data->cue_event->timestamp,
|
const int64_t time_scale =
|
||||||
|
streams_[stream_data->stream_index]->time_scale();
|
||||||
|
const double time_in_seconds = stream_data->cue_event->time_in_seconds;
|
||||||
|
const int64_t scaled_time =
|
||||||
|
static_cast<int64_t>(time_in_seconds * time_scale);
|
||||||
|
muxer_listener_->OnCueEvent(scaled_time,
|
||||||
stream_data->cue_event->cue_data);
|
stream_data->cue_event->cue_data);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
|
@ -12,12 +12,15 @@
|
||||||
#include "packager/base/threading/platform_thread.h"
|
#include "packager/base/threading/platform_thread.h"
|
||||||
#include "packager/media/base/media_sample.h"
|
#include "packager/media/base/media_sample.h"
|
||||||
|
|
||||||
namespace {
|
|
||||||
int64_t kThreadIdUnset = -1;
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
|
namespace {
|
||||||
|
int64_t kThreadIdUnset = -1;
|
||||||
|
|
||||||
|
double TimeInSeconds(const Scte35Event& event, int64_t timescale) {
|
||||||
|
return static_cast<double>(event.start_time) / timescale;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
ChunkingHandler::ChunkingHandler(const ChunkingParams& chunking_params)
|
ChunkingHandler::ChunkingHandler(const ChunkingParams& chunking_params)
|
||||||
: chunking_params_(chunking_params),
|
: chunking_params_(chunking_params),
|
||||||
|
@ -179,6 +182,9 @@ Status ChunkingHandler::OnMediaSample(std::unique_ptr<StreamData> stream_data) {
|
||||||
Status ChunkingHandler::ProcessMainMediaSample(const MediaSample* sample) {
|
Status ChunkingHandler::ProcessMainMediaSample(const MediaSample* sample) {
|
||||||
const bool is_key_frame = sample->is_key_frame();
|
const bool is_key_frame = sample->is_key_frame();
|
||||||
const int64_t timestamp = sample->dts();
|
const int64_t timestamp = sample->dts();
|
||||||
|
const int64_t time_scale = time_scales_[main_stream_index_];
|
||||||
|
const double dts_in_seconds = static_cast<double>(sample->dts()) / time_scale;
|
||||||
|
|
||||||
// Check if we need to terminate the current (sub)segment.
|
// Check if we need to terminate the current (sub)segment.
|
||||||
bool new_segment = false;
|
bool new_segment = false;
|
||||||
bool new_subsegment = false;
|
bool new_subsegment = false;
|
||||||
|
@ -194,16 +200,16 @@ Status ChunkingHandler::ProcessMainMediaSample(const MediaSample* sample) {
|
||||||
// We use 'while' instead of 'if' to make sure to pop off multiple SCTE35
|
// We use 'while' instead of 'if' to make sure to pop off multiple SCTE35
|
||||||
// events that may be very close to each other.
|
// events that may be very close to each other.
|
||||||
while (!scte35_events_.empty() &&
|
while (!scte35_events_.empty() &&
|
||||||
(scte35_events_.top()->start_time <= timestamp)) {
|
TimeInSeconds(*scte35_events_.top(), time_scale) <= dts_in_seconds) {
|
||||||
// For simplicity, don't change |current_segment_index_|.
|
// For simplicity, don't change |current_segment_index_|.
|
||||||
current_subsegment_index_ = 0;
|
current_subsegment_index_ = 0;
|
||||||
new_segment = true;
|
new_segment = true;
|
||||||
|
|
||||||
cue_event = std::make_shared<CueEvent>();
|
cue_event = std::make_shared<CueEvent>();
|
||||||
// Use PTS instead of DTS for cue event timestamp.
|
cue_event->time_in_seconds =
|
||||||
cue_event->timestamp = sample->pts();
|
static_cast<double>(sample->pts()) / time_scale;
|
||||||
cue_event->cue_data = scte35_events_.top()->cue_data;
|
cue_event->cue_data = scte35_events_.top()->cue_data;
|
||||||
LOG(INFO) << "Chunked at " << timestamp << " for Ad Cue.";
|
LOG(INFO) << "Chunked at " << dts_in_seconds << " seconds for Ad Cue.";
|
||||||
|
|
||||||
scte35_events_.pop();
|
scte35_events_.pop();
|
||||||
}
|
}
|
||||||
|
@ -298,10 +304,7 @@ Status ChunkingHandler::DispatchCueEventForAllStreams(
|
||||||
std::shared_ptr<CueEvent> cue_event) {
|
std::shared_ptr<CueEvent> cue_event) {
|
||||||
Status status;
|
Status status;
|
||||||
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
|
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
|
||||||
std::shared_ptr<CueEvent> new_cue_event(new CueEvent(*cue_event));
|
status.Update(DispatchCueEvent(i, cue_event));
|
||||||
new_cue_event->timestamp = cue_event->timestamp * time_scales_[i] /
|
|
||||||
time_scales_[main_stream_index_];
|
|
||||||
status.Update(DispatchCueEvent(i, std::move(new_cue_event)));
|
|
||||||
}
|
}
|
||||||
return status;
|
return status;
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,8 +65,7 @@ TEST_F(ChunkingHandlerTest, AudioNoSubsegmentsThenFlush) {
|
||||||
for (int i = 0; i < 5; ++i) {
|
for (int i = 0; i < 5; ++i) {
|
||||||
ClearOutputStreamDataVector();
|
ClearOutputStreamDataVector();
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0,
|
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
|
||||||
GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
|
|
||||||
// One output stream_data except when i == 3, which also has SegmentInfo.
|
// One output stream_data except when i == 3, which also has SegmentInfo.
|
||||||
if (i == 3) {
|
if (i == 3) {
|
||||||
EXPECT_THAT(GetOutputStreamDataVector(),
|
EXPECT_THAT(GetOutputStreamDataVector(),
|
||||||
|
@ -99,8 +98,7 @@ TEST_F(ChunkingHandlerTest, AudioWithSubsegments) {
|
||||||
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
|
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
|
||||||
for (int i = 0; i < 5; ++i) {
|
for (int i = 0; i < 5; ++i) {
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0,
|
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
|
||||||
GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
|
|
||||||
}
|
}
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
|
@ -131,11 +129,8 @@ TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
|
||||||
// Alternate key frame.
|
// Alternate key frame.
|
||||||
const bool is_key_frame = (i % 2) == 1;
|
const bool is_key_frame = (i % 2) == 1;
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0,
|
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
|
||||||
GetMediaSample(
|
kDuration1, is_key_frame))));
|
||||||
kVideoStartTimestamp + i * kDuration1,
|
|
||||||
kDuration1,
|
|
||||||
is_key_frame))));
|
|
||||||
}
|
}
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
|
@ -224,22 +219,16 @@ TEST_F(ChunkingHandlerTest, AudioAndVideo) {
|
||||||
// The audio and video are made ~aligned.
|
// The audio and video are made ~aligned.
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0,
|
kStreamIndex0,
|
||||||
GetMediaSample(
|
GetMediaSample(kAudioStartTimestamp + kDuration0 * 5, kDuration0,
|
||||||
kAudioStartTimestamp + kDuration0 * 5,
|
true)))); // 13595
|
||||||
kDuration0,
|
|
||||||
true)))); // 13595
|
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex1,
|
kStreamIndex1,
|
||||||
GetMediaSample(
|
GetMediaSample(kVideoStartTimestamp + kDuration1 * 5, kDuration1,
|
||||||
kVideoStartTimestamp + kDuration1 * 5,
|
true)))); // 13845
|
||||||
kDuration1,
|
|
||||||
true)))); // 13845
|
|
||||||
ASSERT_OK(Process(StreamData::FromMediaSample(
|
ASSERT_OK(Process(StreamData::FromMediaSample(
|
||||||
kStreamIndex0,
|
kStreamIndex0,
|
||||||
GetMediaSample(
|
GetMediaSample(kAudioStartTimestamp + kDuration0 * 6, kDuration0,
|
||||||
kAudioStartTimestamp + kDuration0 * 6,
|
true)))); // 13845
|
||||||
kDuration0,
|
|
||||||
true)))); // 13845
|
|
||||||
// This expectation are separated from the expectation above because
|
// This expectation are separated from the expectation above because
|
||||||
// ElementsAre supports at most 10 elements.
|
// ElementsAre supports at most 10 elements.
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
|
@ -304,6 +293,7 @@ TEST_F(ChunkingHandlerTest, Scte35Event) {
|
||||||
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
|
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
|
||||||
kDuration1, is_key_frame))));
|
kDuration1, is_key_frame))));
|
||||||
}
|
}
|
||||||
|
|
||||||
EXPECT_THAT(
|
EXPECT_THAT(
|
||||||
GetOutputStreamDataVector(),
|
GetOutputStreamDataVector(),
|
||||||
ElementsAre(
|
ElementsAre(
|
||||||
|
@ -313,8 +303,9 @@ TEST_F(ChunkingHandlerTest, Scte35Event) {
|
||||||
// A new segment is created due to the existance of Cue.
|
// A new segment is created due to the existance of Cue.
|
||||||
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp, kDuration1,
|
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp, kDuration1,
|
||||||
!kIsSubsegment, !kEncrypted),
|
!kIsSubsegment, !kEncrypted),
|
||||||
IsCueEvent(kStreamIndex0,
|
IsCueEvent(
|
||||||
static_cast<double>(kVideoStartTimestamp + kDuration1)),
|
kStreamIndex0,
|
||||||
|
static_cast<double>(kVideoStartTimestamp + kDuration1) / 1000),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 1,
|
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 1,
|
||||||
kDuration1, !kEncrypted),
|
kDuration1, !kEncrypted),
|
||||||
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,
|
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,
|
||||||
|
|
Loading…
Reference in New Issue