Ignore extra Ad cues at the end of the stream
They will result in empty DASH representations, which is not spec compliant. For text, if the cue is before the max end time, it will still be dispatched as the text samples intercepted by the cue can be split into two at the cue point. Bug: 111359775. Change-Id: I55c2025c4e9d64c88e6a685c0cf3024a0cc4a6d8
This commit is contained in:
parent
86a86ce954
commit
9a26ec1e83
|
@ -6,6 +6,8 @@
|
|||
|
||||
#include "packager/media/chunking/cue_alignment_handler.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "packager/status_macros.h"
|
||||
|
||||
namespace shaka {
|
||||
|
@ -49,6 +51,15 @@ double TimeInSeconds(const StreamInfo& info, const StreamData& data) {
|
|||
return static_cast<double>(scaled_time) / time_scale;
|
||||
}
|
||||
|
||||
double TextEndTimeInSeconds(const StreamInfo& info, const StreamData& data) {
|
||||
DCHECK(data.text_sample);
|
||||
|
||||
const int64_t scaled_time = data.text_sample->EndTime();
|
||||
const uint32_t time_scale = info.time_scale();
|
||||
|
||||
return static_cast<double>(scaled_time) / time_scale;
|
||||
}
|
||||
|
||||
Status GetNextCue(double hint,
|
||||
SyncPointQueue* sync_points,
|
||||
std::shared_ptr<const CueEvent>* out_cue) {
|
||||
|
@ -132,12 +143,23 @@ Status CueAlignmentHandler::OnFlushRequest(size_t stream_index) {
|
|||
RETURN_IF_ERROR(RunThroughSamples(&stream));
|
||||
DCHECK_EQ(stream.samples.size(), 0u);
|
||||
|
||||
// It is possible for there to be cues that come after all the samples. Make
|
||||
// sure to send them out too.
|
||||
while (stream.cues.size()) {
|
||||
RETURN_IF_ERROR(Dispatch(std::move(stream.cues.front())));
|
||||
stream.cues.pop_front();
|
||||
// Ignore extra cues at the end, except for text, as they will result in
|
||||
// empty DASH Representations, which is not spec compliant.
|
||||
// For text, if the cue is before the max end time, it will still be
|
||||
// dispatched as the text samples intercepted by the cue can be split into
|
||||
// two at the cue point.
|
||||
for (auto& cue : stream.cues) {
|
||||
// |max_text_sample_end_time_seconds| is always 0 for non-text samples.
|
||||
if (cue->cue_event->time_in_seconds <
|
||||
stream.max_text_sample_end_time_seconds) {
|
||||
RETURN_IF_ERROR(Dispatch(std::move(cue)));
|
||||
} else {
|
||||
VLOG(1) << "Ignore extra cue in stream " << cue->stream_index
|
||||
<< " with time " << cue->cue_event->time_in_seconds
|
||||
<< "s in the end.";
|
||||
}
|
||||
}
|
||||
stream.cues.clear();
|
||||
}
|
||||
|
||||
return FlushAllDownstreams();
|
||||
|
@ -218,9 +240,16 @@ Status CueAlignmentHandler::OnSample(std::unique_ptr<StreamData> sample) {
|
|||
// us until there is a sync point.
|
||||
|
||||
const size_t stream_index = sample->stream_index;
|
||||
|
||||
if (sample->text_sample) {
|
||||
StreamState& stream = stream_states_[stream_index];
|
||||
stream.max_text_sample_end_time_seconds =
|
||||
std::max(stream.max_text_sample_end_time_seconds,
|
||||
TextEndTimeInSeconds(*stream.info, *sample));
|
||||
}
|
||||
|
||||
const StreamType stream_type =
|
||||
stream_states_[stream_index].info->stream_type();
|
||||
|
||||
const bool is_video = stream_type == kStreamVideo;
|
||||
|
||||
return is_video ? OnVideoSample(std::move(sample))
|
||||
|
|
|
@ -39,6 +39,8 @@ class CueAlignmentHandler : public MediaHandler {
|
|||
std::list<std::unique_ptr<StreamData>> samples;
|
||||
// If set, the stream is pending to be flushed.
|
||||
bool to_be_flushed = false;
|
||||
// Only set for text stream.
|
||||
double max_text_sample_end_time_seconds = 0;
|
||||
|
||||
// A list of cues that the stream should inject between media samples. When
|
||||
// there are no cues, the stream should run up to the hint.
|
||||
|
|
|
@ -474,11 +474,15 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCueAfterLastStart) {
|
|||
const int64_t kSample2Start = kSample1End;
|
||||
const int64_t kSample2End = kSample2Start + kSampleDuration;
|
||||
|
||||
// Put the cue between the start and end of the last sample.
|
||||
const double kCueTimeInSeconds =
|
||||
static_cast<double>(kSample2Start + kSample2End) / kMsTimeScale;
|
||||
// Put the cue 1 between the start and end of the last sample.
|
||||
const double kCue1TimeInSeconds =
|
||||
static_cast<double>(kSample2Start + kSample2End) / 2 / kMsTimeScale;
|
||||
|
||||
auto sync_points = CreateSyncPoints({kCueTimeInSeconds});
|
||||
// Put the cue 2 after the end of the last sample.
|
||||
const double kCue2TimeInSeconds =
|
||||
static_cast<double>(kSample2End) / kMsTimeScale;
|
||||
|
||||
auto sync_points = CreateSyncPoints({kCue1TimeInSeconds, kCue2TimeInSeconds});
|
||||
auto handler = std::make_shared<CueAlignmentHandler>(sync_points.get());
|
||||
ASSERT_OK(SetUpAndInitializeGraph(handler, kOneInput, kOneOutput));
|
||||
|
||||
|
@ -496,8 +500,10 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCueAfterLastStart) {
|
|||
EXPECT_CALL(
|
||||
*Output(kTextStream),
|
||||
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
|
||||
// Cue before the sample end is processed.
|
||||
EXPECT_CALL(*Output(kTextStream),
|
||||
OnProcess(IsCueEvent(_, kCueTimeInSeconds)));
|
||||
OnProcess(IsCueEvent(_, kCue1TimeInSeconds)));
|
||||
// Cue after the samples is ignored.
|
||||
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue