Start Segments At First Sample in Text Chunker
We always assumed that text stream started at zero in the text chunker. This meant that if the text stream started later than zero (like in a live stream) we would generate a lot of empty segments. Instead the text chunker will assume that segments should be adjusted to align with multiple of segment duration. The text chunker will assume that an earlier component in the pipeline (i.e. text padder) will ensure that the first sample has the time that we want to start segments at (even it needs to add an empty sample). Issue #416 Change-Id: Ie45844354d6e9448787cae896841b5ab31721ed6
This commit is contained in:
parent
f21d93dd65
commit
5f072e3b08
|
@ -73,7 +73,20 @@ Status TextChunker::OnCueEvent(std::shared_ptr<const CueEvent> event) {
|
|||
Status TextChunker::OnTextSample(std::shared_ptr<const TextSample> sample) {
|
||||
// Output all segments that come before our new sample.
|
||||
const int64_t sample_start = sample->start_time();
|
||||
|
||||
// If we have not seen a sample yet, base all segments off the first sample's
|
||||
// start time.
|
||||
if (segment_start_ < 0) {
|
||||
// Force the first segment to start at the segment that would have started
|
||||
// before the sample. This should allow segments from different streams to
|
||||
// align.
|
||||
segment_start_ = (sample_start / segment_duration_) * segment_duration_;
|
||||
}
|
||||
|
||||
// We need to write all the segments that would have ended before the new
|
||||
// sample started.
|
||||
while (sample_start >= segment_start_ + segment_duration_) {
|
||||
// |DispatchSegment| will advance |segment_start_|.
|
||||
RETURN_IF_ERROR(DispatchSegment(segment_duration_));
|
||||
}
|
||||
|
||||
|
|
|
@ -49,8 +49,8 @@ class TextChunker : public MediaHandler {
|
|||
int64_t time_scale_ = -1; // Set in OnStreamInfo
|
||||
|
||||
// Time values are in scaled units.
|
||||
int64_t segment_start_ = 0;
|
||||
int64_t segment_duration_ = -1; // Set in OnStreamInfo
|
||||
int64_t segment_start_ = -1; // Set when the first sample comes in.
|
||||
int64_t segment_duration_ = -1; // Set in OnStreamInfo.
|
||||
|
||||
// All samples that make up the current segment. We must store the samples
|
||||
// until the segment ends because a cue event may end the segment sooner
|
||||
|
|
|
@ -38,12 +38,59 @@ const char* kNoPayload = "";
|
|||
|
||||
class TextChunkerTest : public MediaHandlerTestBase {
|
||||
protected:
|
||||
void Init(double segment_duration) {
|
||||
ASSERT_OK(SetUpAndInitializeGraph(
|
||||
std::make_shared<TextChunker>(segment_duration), kInputs, kOutputs));
|
||||
Status Init(double segment_duration) {
|
||||
return SetUpAndInitializeGraph(
|
||||
std::make_shared<TextChunker>(segment_duration), kInputs, kOutputs);
|
||||
}
|
||||
};
|
||||
|
||||
// Verify that the chunker will use the first sample's start time as the start
|
||||
// time for the first segment.
|
||||
//
|
||||
// Segment Duration = 100 MS
|
||||
//
|
||||
// TIME (ms):0 5 1 1 2 2 3
|
||||
// 0 0 5 0 5 0
|
||||
// 0 0 0 0 0
|
||||
// SAMPLES : [-----A-----]
|
||||
// SEGMENTS : ^ ^ ^
|
||||
//
|
||||
TEST_F(TextChunkerTest, SegmentsStartAtFirstSample) {
|
||||
const double kSegmentDurationSec = 0.1;
|
||||
const int64_t kSegmentDurationMs = 100;
|
||||
const int64_t kSegment0Start = 100;
|
||||
const int64_t kSegment1Start = 200;
|
||||
|
||||
const int64_t kSampleAStart = 120;
|
||||
const int64_t kSampleAEnd = 220;
|
||||
|
||||
ASSERT_OK(Init(kSegmentDurationSec));
|
||||
|
||||
{
|
||||
testing::InSequence s;
|
||||
|
||||
EXPECT_CALL(*Output(kOutput), OnProcess(IsStreamInfo(_, _, _, _)));
|
||||
EXPECT_CALL(*Output(kOutput), OnProcess(IsTextSample(_, _, kSampleAStart,
|
||||
kSampleAEnd, _, _)));
|
||||
EXPECT_CALL(
|
||||
*Output(kOutput),
|
||||
OnProcess(IsSegmentInfo(_, kSegment0Start, kSegmentDurationMs, _, _)));
|
||||
EXPECT_CALL(*Output(kOutput), OnProcess(IsTextSample(_, _, kSampleAStart,
|
||||
kSampleAEnd, _, _)));
|
||||
EXPECT_CALL(
|
||||
*Output(kOutput),
|
||||
OnProcess(IsSegmentInfo(_, kSegment1Start, kSegmentDurationMs, _, _)));
|
||||
EXPECT_CALL(*Output(kOutput), OnFlush(kStreamIndex));
|
||||
}
|
||||
|
||||
ASSERT_OK(Input(kInput)->Dispatch(StreamData::FromStreamInfo(
|
||||
kStreamIndex, GetTextStreamInfo(kMsTimeScale))));
|
||||
ASSERT_OK(Input(kInput)->Dispatch(StreamData::FromTextSample(
|
||||
kStreamIndex,
|
||||
GetTextSample(kNoId, kSampleAStart, kSampleAEnd, kNoPayload))));
|
||||
ASSERT_OK(Input(kInput)->FlushAllDownstreams());
|
||||
}
|
||||
|
||||
// Verify that when a sample elapses a full segment, that it only appears
|
||||
// in the one segment.
|
||||
//
|
||||
|
@ -98,7 +145,7 @@ TEST_F(TextChunkerTest, SampleEndingOnSegmentStart) {
|
|||
// TIME (ms):0 5 1 1 2
|
||||
// 0 0 5 0
|
||||
// 0 0 0
|
||||
// SAMPLES : [--A--]
|
||||
// SAMPLES :[--A--]
|
||||
// [--B--]
|
||||
// SEGMENTS : ^ ^
|
||||
//
|
||||
|
@ -109,11 +156,11 @@ TEST_F(TextChunkerTest, CreatesSegmentsForSamples) {
|
|||
const int64_t kSegment0Start = 0;
|
||||
const int64_t kSegment1Start = 100;
|
||||
|
||||
const int64_t kSampleAStart = 25;
|
||||
const int64_t kSampleAEnd = 75;
|
||||
const int64_t kSampleAStart = 0;
|
||||
const int64_t kSampleAEnd = 50;
|
||||
|
||||
const int64_t kSampleBStart = 125;
|
||||
const int64_t kSampleBEnd = 175;
|
||||
const int64_t kSampleBStart = 100;
|
||||
const int64_t kSampleBEnd = 150;
|
||||
|
||||
Init(kSegmentDurationSec);
|
||||
|
||||
|
@ -163,7 +210,7 @@ TEST_F(TextChunkerTest, CreatesSegmentsForSamples) {
|
|||
// TIME (ms):0 5 1 1 2 2 3
|
||||
// 0 0 5 0 5 0
|
||||
// 0 0 0 0 0
|
||||
// SAMPLES : [--A--]
|
||||
// SAMPLES :[--A--]
|
||||
// [--B--]
|
||||
// SEGMENTS : ^ ^ ^
|
||||
//
|
||||
|
@ -175,11 +222,11 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) {
|
|||
const int64_t kSegment1Start = 100;
|
||||
const int64_t kSegment2Start = 200;
|
||||
|
||||
const int64_t kSampleAStart = 25;
|
||||
const int64_t kSampleAEnd = 75;
|
||||
const int64_t kSampleAStart = 0;
|
||||
const int64_t kSampleAEnd = 50;
|
||||
|
||||
const int64_t kSampleBStart = 225;
|
||||
const int64_t kSampleBEnd = 275;
|
||||
const int64_t kSampleBStart = 200;
|
||||
const int64_t kSampleBEnd = 250;
|
||||
|
||||
Init(kSegmentDurationSec);
|
||||
|
||||
|
@ -235,7 +282,7 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) {
|
|||
// TIME (ms):0 5 1 1
|
||||
// 0 0 5
|
||||
// 0 0
|
||||
// SAMPLES : [-----A-----]
|
||||
// SAMPLES :[--------A--------]
|
||||
// SEGMENTS : ^
|
||||
//
|
||||
TEST_F(TextChunkerTest, SampleCrossesSegments) {
|
||||
|
@ -245,7 +292,7 @@ TEST_F(TextChunkerTest, SampleCrossesSegments) {
|
|||
const int64_t kSegment0Start = 0;
|
||||
const int64_t kSegment1Start = 100;
|
||||
|
||||
const int64_t kSampleAStart = 50;
|
||||
const int64_t kSampleAStart = 0;
|
||||
const int64_t kSampleAEnd = 150;
|
||||
|
||||
Init(kSegmentDurationSec);
|
||||
|
@ -293,9 +340,9 @@ TEST_F(TextChunkerTest, SampleCrossesSegments) {
|
|||
// TIME (ms):0 5 1 1 2 2 3
|
||||
// 0 0 5 0 5 0
|
||||
// 0 0 0 0 0
|
||||
// SAMPLES : [-----A-----]
|
||||
// [-----B-----]
|
||||
// [-----------C-----------]
|
||||
// SAMPLES :[--------A--------]
|
||||
// [--------B--------]
|
||||
// [-----------------C-----------]
|
||||
// SEGMENTS : ^ ^ ^
|
||||
//
|
||||
TEST_F(TextChunkerTest, PreservesOrder) {
|
||||
|
@ -306,13 +353,13 @@ TEST_F(TextChunkerTest, PreservesOrder) {
|
|||
const int64_t kSegment1Start = 100;
|
||||
const int64_t kSegment2Start = 200;
|
||||
|
||||
const int64_t kSampleAStart = 50;
|
||||
const int64_t kSampleAStart = 0;
|
||||
const int64_t kSampleAEnd = 150;
|
||||
|
||||
const int64_t kSampleBStart = 50;
|
||||
const int64_t kSampleBStart = 0;
|
||||
const int64_t kSampleBEnd = 150;
|
||||
|
||||
const int64_t kSampleCStart = 50;
|
||||
const int64_t kSampleCStart = 0;
|
||||
const int64_t kSampleCEnd = 250;
|
||||
|
||||
const char* kSampleAId = "sample 0";
|
||||
|
@ -391,7 +438,7 @@ TEST_F(TextChunkerTest, PreservesOrder) {
|
|||
// TIME (ms):0 5 1 1 2 2
|
||||
// 0 0 5 0 5
|
||||
// 0 0 0 0
|
||||
// SAMPLES : [-----------A-----------]
|
||||
// SAMPLES :[--------------A--------------]
|
||||
// [-----B------]
|
||||
// SEGMENTS : ^ ^ ^ ^ ^
|
||||
//
|
||||
|
@ -399,8 +446,8 @@ TEST_F(TextChunkerTest, NestedSamples) {
|
|||
const double kSegmentDurationSec = 0.05;
|
||||
const int64_t kSegmentDurationMs = 50;
|
||||
|
||||
const int64_t kSampleAStart = 25;
|
||||
const int64_t kSampleAEnd = 225;
|
||||
const int64_t kSampleAStart = 0;
|
||||
const int64_t kSampleAEnd = 250;
|
||||
|
||||
const int64_t kSampleBStart = 75;
|
||||
const int64_t kSampleBEnd = 175;
|
||||
|
@ -495,7 +542,7 @@ TEST_F(TextChunkerTest, NestedSamples) {
|
|||
// TIME (ms):0 5 1 1 2 2 3
|
||||
// 0 0 5 0 5 0
|
||||
// 0 0 0 0 0
|
||||
// SAMPLES : [--------A--------]
|
||||
// SAMPLES :[-----------A-----------]
|
||||
// [--B--]
|
||||
// SEGMENTS : ^ ^ ^
|
||||
//
|
||||
|
@ -507,7 +554,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
|
|||
const int64_t kSegment1Start = 100;
|
||||
const int64_t kSegment2Start = 200;
|
||||
|
||||
const int64_t kSampleAStart = 50;
|
||||
const int64_t kSampleAStart = 0;
|
||||
const int64_t kSampleAEnd = 200;
|
||||
|
||||
const int64_t kSampleBStart = 200;
|
||||
|
@ -571,7 +618,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
|
|||
// TIME (ms):0 5 1 1 2 2 3 3 4 5
|
||||
// 0 0 5 0 5 0 5 5 0
|
||||
// 0 0 0 0 0 0 0 0
|
||||
// SAMPLES : [-----------A-----------]
|
||||
// SAMPLES :[--------------A--------------]
|
||||
// CUES : ^ ^
|
||||
// SEGMENTS : ^ ^ ^
|
||||
//
|
||||
|
@ -579,7 +626,7 @@ TEST_F(TextChunkerTest, SampleSpanningMultipleCues) {
|
|||
const double kSegmentDurationSec = 0.3;
|
||||
const int64_t kSegmentDurationMs = 300;
|
||||
|
||||
const int64_t kSampleAStart = 50;
|
||||
const int64_t kSampleAStart = 0;
|
||||
const int64_t kSampleAEnd = 250;
|
||||
|
||||
const double kC0 = 0.1;
|
||||
|
|
Loading…
Reference in New Issue