Change CueEvent to use time in seconds

Change-Id: I03527db76905f25f0a142940f5fcf73e2a92d42f
This commit is contained in:
Aaron Vaage 2018-03-15 16:01:47 -07:00 committed by KongQun Yang
parent f2f88c159e
commit d04f0ef00f
5 changed files with 40 additions and 42 deletions

View File

@ -45,8 +45,8 @@ enum class CueEventType { kCueIn, kCueOut, kCuePoint };
// In server-based model, Chunking Handler consolidates SCTE-35 events and
// generates CueEvent before an ad is about to be inserted.
struct CueEvent {
int64_t timestamp = 0;
CueEventType type = CueEventType::kCuePoint;
double time_in_seconds;
std::string cue_data;
};

View File

@ -139,16 +139,16 @@ MATCHER_P5(IsTextSample, id, start_time, end_time, settings, payload, "") {
arg->text_sample->payload() == payload;
}
MATCHER_P2(IsCueEvent, stream_index, timestamp, "") {
MATCHER_P2(IsCueEvent, stream_index, time_in_seconds, "") {
if (arg->stream_data_type != StreamDataType::kCueEvent) {
*result_listener << "which is "
<< StreamDataTypeToString(arg->stream_data_type);
return false;
}
*result_listener << "which is (" << arg->stream_index << ","
<< arg->cue_event->timestamp << ")";
<< arg->cue_event->time_in_seconds << ")";
return arg->stream_index == stream_index &&
arg->cue_event->timestamp == timestamp;
arg->cue_event->time_in_seconds == time_in_seconds;
}
class FakeInputMediaHandler : public MediaHandler {

View File

@ -70,11 +70,15 @@ Status Muxer::Process(std::unique_ptr<StreamData> stream_data) {
return FinalizeSegment(stream_data->stream_index, segment_info);
}
case StreamDataType::kMediaSample:
return AddSample(stream_data->stream_index,
*stream_data->media_sample);
return AddSample(stream_data->stream_index, *stream_data->media_sample);
case StreamDataType::kCueEvent:
if (muxer_listener_) {
muxer_listener_->OnCueEvent(stream_data->cue_event->timestamp,
const int64_t time_scale =
streams_[stream_data->stream_index]->time_scale();
const double time_in_seconds = stream_data->cue_event->time_in_seconds;
const int64_t scaled_time =
static_cast<int64_t>(time_in_seconds * time_scale);
muxer_listener_->OnCueEvent(scaled_time,
stream_data->cue_event->cue_data);
}
break;

View File

@ -12,12 +12,15 @@
#include "packager/base/threading/platform_thread.h"
#include "packager/media/base/media_sample.h"
namespace {
int64_t kThreadIdUnset = -1;
} // namespace
namespace shaka {
namespace media {
namespace {
int64_t kThreadIdUnset = -1;
double TimeInSeconds(const Scte35Event& event, int64_t timescale) {
return static_cast<double>(event.start_time) / timescale;
}
} // namespace
ChunkingHandler::ChunkingHandler(const ChunkingParams& chunking_params)
: chunking_params_(chunking_params),
@ -179,6 +182,9 @@ Status ChunkingHandler::OnMediaSample(std::unique_ptr<StreamData> stream_data) {
Status ChunkingHandler::ProcessMainMediaSample(const MediaSample* sample) {
const bool is_key_frame = sample->is_key_frame();
const int64_t timestamp = sample->dts();
const int64_t time_scale = time_scales_[main_stream_index_];
const double dts_in_seconds = static_cast<double>(sample->dts()) / time_scale;
// Check if we need to terminate the current (sub)segment.
bool new_segment = false;
bool new_subsegment = false;
@ -194,16 +200,16 @@ Status ChunkingHandler::ProcessMainMediaSample(const MediaSample* sample) {
// We use 'while' instead of 'if' to make sure to pop off multiple SCTE35
// events that may be very close to each other.
while (!scte35_events_.empty() &&
(scte35_events_.top()->start_time <= timestamp)) {
TimeInSeconds(*scte35_events_.top(), time_scale) <= dts_in_seconds) {
// For simplicity, don't change |current_segment_index_|.
current_subsegment_index_ = 0;
new_segment = true;
cue_event = std::make_shared<CueEvent>();
// Use PTS instead of DTS for cue event timestamp.
cue_event->timestamp = sample->pts();
cue_event->time_in_seconds =
static_cast<double>(sample->pts()) / time_scale;
cue_event->cue_data = scte35_events_.top()->cue_data;
LOG(INFO) << "Chunked at " << timestamp << " for Ad Cue.";
LOG(INFO) << "Chunked at " << dts_in_seconds << " seconds for Ad Cue.";
scte35_events_.pop();
}
@ -298,10 +304,7 @@ Status ChunkingHandler::DispatchCueEventForAllStreams(
std::shared_ptr<CueEvent> cue_event) {
Status status;
for (size_t i = 0; i < segment_info_.size() && status.ok(); ++i) {
std::shared_ptr<CueEvent> new_cue_event(new CueEvent(*cue_event));
new_cue_event->timestamp = cue_event->timestamp * time_scales_[i] /
time_scales_[main_stream_index_];
status.Update(DispatchCueEvent(i, std::move(new_cue_event)));
status.Update(DispatchCueEvent(i, cue_event));
}
return status;
}

View File

@ -65,8 +65,7 @@ TEST_F(ChunkingHandlerTest, AudioNoSubsegmentsThenFlush) {
for (int i = 0; i < 5; ++i) {
ClearOutputStreamDataVector();
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
// One output stream_data except when i == 3, which also has SegmentInfo.
if (i == 3) {
EXPECT_THAT(GetOutputStreamDataVector(),
@ -99,8 +98,7 @@ TEST_F(ChunkingHandlerTest, AudioWithSubsegments) {
kStreamIndex0, GetAudioStreamInfo(kTimeScale0))));
for (int i = 0; i < 5; ++i) {
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
kStreamIndex0, GetMediaSample(i * kDuration1, kDuration1, kKeyFrame))));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
@ -131,11 +129,8 @@ TEST_F(ChunkingHandlerTest, VideoAndSubsegmentAndNonzeroStart) {
// Alternate key frame.
const bool is_key_frame = (i % 2) == 1;
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(
kVideoStartTimestamp + i * kDuration1,
kDuration1,
is_key_frame))));
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
kDuration1, is_key_frame))));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
@ -224,22 +219,16 @@ TEST_F(ChunkingHandlerTest, AudioAndVideo) {
// The audio and video are made ~aligned.
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(
kAudioStartTimestamp + kDuration0 * 5,
kDuration0,
true)))); // 13595
GetMediaSample(kAudioStartTimestamp + kDuration0 * 5, kDuration0,
true)))); // 13595
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex1,
GetMediaSample(
kVideoStartTimestamp + kDuration1 * 5,
kDuration1,
true)))); // 13845
GetMediaSample(kVideoStartTimestamp + kDuration1 * 5, kDuration1,
true)))); // 13845
ASSERT_OK(Process(StreamData::FromMediaSample(
kStreamIndex0,
GetMediaSample(
kAudioStartTimestamp + kDuration0 * 6,
kDuration0,
true)))); // 13845
GetMediaSample(kAudioStartTimestamp + kDuration0 * 6, kDuration0,
true)))); // 13845
// This expectation are separated from the expectation above because
// ElementsAre supports at most 10 elements.
EXPECT_THAT(
@ -304,6 +293,7 @@ TEST_F(ChunkingHandlerTest, Scte35Event) {
kStreamIndex0, GetMediaSample(kVideoStartTimestamp + i * kDuration1,
kDuration1, is_key_frame))));
}
EXPECT_THAT(
GetOutputStreamDataVector(),
ElementsAre(
@ -313,8 +303,9 @@ TEST_F(ChunkingHandlerTest, Scte35Event) {
// A new segment is created due to the existance of Cue.
IsSegmentInfo(kStreamIndex0, kVideoStartTimestamp, kDuration1,
!kIsSubsegment, !kEncrypted),
IsCueEvent(kStreamIndex0,
static_cast<double>(kVideoStartTimestamp + kDuration1)),
IsCueEvent(
kStreamIndex0,
static_cast<double>(kVideoStartTimestamp + kDuration1) / 1000),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 1,
kDuration1, !kEncrypted),
IsMediaSample(kStreamIndex0, kVideoStartTimestamp + kDuration1 * 2,