Add text fragment and setting types.

This sets the groundwork for more generic text cues by having a more
generic object for the settings and the body.  This also changes the
TextSample to be immutable and accepts the fields in the constructor
instead of using setters.

Change-Id: I76b09ce8e8471a49e6bf447e8c187f867728a4bf
This commit is contained in:
Jacob Trimble 2020-08-24 15:23:15 -07:00
parent 56908a83a7
commit b2220eb0c6
16 changed files with 194 additions and 220 deletions

View File

@ -266,12 +266,8 @@ std::unique_ptr<TextSample> MediaHandlerTestBase::GetTextSample(
int64_t start,
int64_t end,
const std::string& payload) const {
std::unique_ptr<TextSample> sample(new TextSample);
sample->set_id(id);
sample->SetTime(start, end);
sample->AppendPayload(payload);
return sample;
return std::unique_ptr<TextSample>{
new TextSample(id, start, end, {}, TextFragment{payload})};
}
std::unique_ptr<CueEvent> MediaHandlerTestBase::GetCueEvent(

View File

@ -208,14 +208,7 @@ MATCHER_P5(IsMediaSample,
"is_key_frame");
}
MATCHER_P6(IsTextSample,
stream_index,
id,
start_time,
end_time,
settings,
payload,
"") {
MATCHER_P4(IsTextSample, stream_index, id, start_time, end_time, "") {
if (!TryMatchStreamDataType(arg->stream_data_type,
StreamDataType::kTextSample, result_listener)) {
return false;
@ -224,9 +217,7 @@ MATCHER_P6(IsTextSample,
*result_listener << "which is (" << arg->stream_index << ", "
<< ToPrettyString(arg->text_sample->id()) << ", "
<< arg->text_sample->start_time() << ", "
<< arg->text_sample->EndTime() << ", "
<< ToPrettyString(arg->text_sample->settings()) << ", "
<< ToPrettyString(arg->text_sample->payload()) << ")";
<< arg->text_sample->EndTime() << ")";
return TryMatch(arg->stream_index, stream_index, result_listener,
"stream_index") &&
@ -234,11 +225,7 @@ MATCHER_P6(IsTextSample,
TryMatch(arg->text_sample->start_time(), start_time, result_listener,
"start_time") &&
TryMatch(arg->text_sample->EndTime(), end_time, result_listener,
"EndTime") &&
TryMatch(arg->text_sample->settings(), settings, result_listener,
"settings") &&
TryMatch(arg->text_sample->payload(), payload, result_listener,
"payload");
"EndTime");
}
MATCHER_P2(IsCueEvent, stream_index, time_in_seconds, "") {

View File

@ -11,31 +11,24 @@
namespace shaka {
namespace media {
bool TextFragment::is_empty() const {
return body.empty();
}
TextSample::TextSample(const std::string& id,
int64_t start_time,
int64_t end_time,
const TextSettings& settings,
const TextFragment& body)
: id_(id),
start_time_(start_time),
duration_(end_time - start_time),
settings_(settings),
body_(body) {}
int64_t TextSample::EndTime() const {
return start_time_ + duration_;
}
void TextSample::SetTime(int64_t start_time, int64_t end_time) {
DCHECK_GE(start_time, 0);
DCHECK_GT(end_time, 0);
DCHECK_LT(start_time, end_time);
start_time_ = start_time;
duration_ = end_time - start_time;
}
void TextSample::AppendStyle(const std::string& style) {
if (settings_.length()) {
settings_ += " ";
}
settings_ += style;
}
void TextSample::AppendPayload(const std::string& payload) {
if (payload_.length()) {
payload_ += "\n";
}
payload_ += payload;
}
} // namespace media
} // namespace shaka

View File

@ -14,32 +14,43 @@
namespace shaka {
namespace media {
struct TextSettings {
// TODO(modmaker): Convert to generic structure.
std::string settings;
};
struct TextFragment {
// TODO(modmaker): Fill with settings and sub-fragments.
std::string body;
bool is_empty() const;
};
class TextSample {
public:
TextSample() = default;
TextSample(const std::string& id,
int64_t start_time,
int64_t end_time,
const TextSettings& settings,
const TextFragment& body);
const std::string& id() const { return id_; }
int64_t start_time() const { return start_time_; }
int64_t duration() const { return duration_; }
const std::string& settings() const { return settings_; }
const std::string& payload() const { return payload_; }
const TextSettings& settings() const { return settings_; }
const TextFragment& body() const { return body_; }
int64_t EndTime() const;
void set_id(const std::string& id) { id_ = id; }
void SetTime(int64_t start_time, int64_t end_time);
void AppendStyle(const std::string& style);
void AppendPayload(const std::string& payload);
private:
// Allow the compiler generated copy constructor and assignment operator
// intentionally. Since the text data is typically small, the performance
// impact is minimal.
std::string id_;
int64_t start_time_ = 0;
int64_t duration_ = 0;
std::string settings_;
std::string payload_;
const std::string id_;
const int64_t start_time_ = 0;
const int64_t duration_ = 0;
const TextSettings settings_;
const TextFragment body_;
};
} // namespace media

View File

@ -204,15 +204,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithNoCues) {
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
}
@ -247,15 +244,12 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithNoCues) {
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
}
@ -440,17 +434,14 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCues) {
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsCueEvent(_, kSample1StartInSeconds)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
}
@ -491,15 +482,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCueAfterLastStart) {
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
// Cue before the sample end is processed.
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsCueEvent(_, kCue1TimeInSeconds)));
@ -543,17 +531,14 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) {
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsCueEvent(_, kSample2StartInSeconds)));
EXPECT_CALL(
*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
}

View File

@ -32,7 +32,6 @@ const bool kSubSegment = true;
const uint64_t kTimescaleMs = 1000;
const char* kNoId = "";
const char* kNoSettings = "";
const char* kNoPayload = "";
} // namespace
@ -70,13 +69,13 @@ TEST_F(TextChunkerTest, SegmentsStartAtFirstSample) {
testing::InSequence s;
EXPECT_CALL(*Output(kOutput), OnProcess(IsStreamInfo(_, _, _, _)));
EXPECT_CALL(*Output(kOutput), OnProcess(IsTextSample(_, _, kSampleAStart,
kSampleAEnd, _, _)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, _, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsSegmentInfo(_, kSegment0Start, kSegmentDurationMs, _, _)));
EXPECT_CALL(*Output(kOutput), OnProcess(IsTextSample(_, _, kSampleAStart,
kSampleAEnd, _, _)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, _, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsSegmentInfo(_, kSegment1Start, kSegmentDurationMs, _, _)));
@ -120,8 +119,7 @@ TEST_F(TextChunkerTest, SampleEndingOnSegmentStart) {
OnProcess(IsStreamInfo(kStreamIndex, _, _, _)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegmentDurationMs, !kSubSegment,
@ -172,8 +170,7 @@ TEST_F(TextChunkerTest, CreatesSegmentsForSamples) {
// Segment One
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegmentDurationMs, !kSubSegment,
@ -181,8 +178,7 @@ TEST_F(TextChunkerTest, CreatesSegmentsForSamples) {
// Segment Two
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
kSegmentDurationMs, !kSubSegment,
@ -238,8 +234,7 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) {
// Segment One
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegmentDurationMs, !kSubSegment,
@ -253,8 +248,7 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) {
// Segment Three
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
kSegmentDurationMs, !kSubSegment,
@ -305,8 +299,7 @@ TEST_F(TextChunkerTest, SampleCrossesSegments) {
// Segment One
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegmentDurationMs, !kSubSegment,
@ -314,8 +307,7 @@ TEST_F(TextChunkerTest, SampleCrossesSegments) {
// Segment Two
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
kSegmentDurationMs, !kSubSegment,
@ -375,39 +367,39 @@ TEST_F(TextChunkerTest, PreservesOrder) {
OnProcess(IsStreamInfo(kStreamIndex, _, _, _)));
// Segment One
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart,
kSampleAEnd, kNoSettings, kNoPayload)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart,
kSampleBEnd, kNoSettings, kNoPayload)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart,
kSampleCEnd, kNoSettings, kNoPayload)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart, kSampleCEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegmentDurationMs, !kSubSegment,
!kEncrypted)));
// Segment Two
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart,
kSampleAEnd, kNoSettings, kNoPayload)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart,
kSampleBEnd, kNoSettings, kNoPayload)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart,
kSampleCEnd, kNoSettings, kNoPayload)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart, kSampleCEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
kSegmentDurationMs, !kSubSegment,
!kEncrypted)));
// Segment Two
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart,
kSampleCEnd, kNoSettings, kNoPayload)));
EXPECT_CALL(
*Output(kOutput),
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart, kSampleCEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
kSegmentDurationMs, !kSubSegment,
@ -468,8 +460,7 @@ TEST_F(TextChunkerTest, NestedSamples) {
// Segment 0
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegmentDurationMs, !kSubSegment,
@ -477,11 +468,9 @@ TEST_F(TextChunkerTest, NestedSamples) {
// Segment 1
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
kSegmentDurationMs, !kSubSegment,
@ -489,11 +478,9 @@ TEST_F(TextChunkerTest, NestedSamples) {
// Segment 2
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
kSegmentDurationMs, !kSubSegment,
@ -501,11 +488,9 @@ TEST_F(TextChunkerTest, NestedSamples) {
// Segment 3
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment3Start,
kSegmentDurationMs, !kSubSegment,
@ -513,8 +498,7 @@ TEST_F(TextChunkerTest, NestedSamples) {
// Segment 4
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment4Start,
kSegmentDurationMs, !kSubSegment,
@ -570,8 +554,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
// Segment One
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegmentDurationMs, !kSubSegment,
@ -579,8 +562,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
// Segment Two
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
kSegmentDurationMs, !kSubSegment,
@ -588,8 +570,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
// Segment Three
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
kSegmentDurationMs, !kSubSegment,
@ -650,8 +631,7 @@ TEST_F(TextChunkerTest, SampleSpanningMultipleCues) {
// Segment 0 and Cue 0
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
kSegment0StartLength, !kSubSegment,
@ -660,8 +640,7 @@ TEST_F(TextChunkerTest, SampleSpanningMultipleCues) {
// Segment 1 and Cue 1
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
kSegment1StartLength, !kSubSegment,
@ -670,8 +649,7 @@ TEST_F(TextChunkerTest, SampleSpanningMultipleCues) {
// Segment 2
EXPECT_CALL(*Output(kOutput),
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
kNoSettings, kNoPayload)));
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
EXPECT_CALL(*Output(kOutput),
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
kSegmentDurationMs, !kSubSegment,

View File

@ -45,8 +45,10 @@ Status TextPadder::OnTextSample(std::unique_ptr<StreamData> data) {
// sample right away. If there will be one, create an empty sample that will
// fill in that gap.
if (sample.start_time() > max_end_time_ms_) {
std::shared_ptr<TextSample> filler = std::make_shared<TextSample>();
filler->SetTime(max_end_time_ms_, sample.start_time());
const std::string kNoId = "";
auto filler = std::make_shared<TextSample>(kNoId, max_end_time_ms_,
sample.start_time(),
TextSettings{}, TextFragment{});
RETURN_IF_ERROR(
MediaHandler::DispatchTextSample(kStreamIndex, std::move(filler)));
}

View File

@ -23,10 +23,10 @@
'webvtt_muxer.h',
'webvtt_parser.cc',
'webvtt_parser.h',
'webvtt_timestamp.cc',
'webvtt_timestamp.h',
'webvtt_to_mp4_handler.cc',
'webvtt_to_mp4_handler.h',
'webvtt_utils.cc',
'webvtt_utils.h',
],
'dependencies': [
'../../../base/base.gyp:base',
@ -42,7 +42,7 @@
'text_readers_unittest.cc',
'webvtt_muxer_unittest.cc',
'webvtt_parser_unittest.cc',
'webvtt_timestamp_unittest.cc',
'webvtt_utils_unittest.cc',
'webvtt_to_mp4_handler_unittest.cc',
],
'dependencies': [

View File

@ -8,7 +8,7 @@
#include "packager/base/strings/stringprintf.h"
#include "packager/media/base/text_sample.h"
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
#include "packager/media/formats/webvtt/webvtt_utils.h"
namespace shaka {
namespace media {
@ -61,15 +61,14 @@ void WebVttFileBuffer::Append(const TextSample& sample) {
buffer_.append(MsToWebVttTimestamp(sample.start_time()));
buffer_.append(" --> ");
buffer_.append(MsToWebVttTimestamp(sample.EndTime()));
// Settings are optional
if (sample.settings().length()) {
const std::string settings = WebVttSettingsToString(sample.settings());
if (!settings.empty()) {
buffer_.append(" ");
buffer_.append(sample.settings());
buffer_.append(settings);
}
buffer_.append("\n"); // end of time & settings
buffer_.append(sample.payload());
buffer_.append(WebVttFragmentToString(sample.body()));
buffer_.append("\n"); // end of payload
buffer_.append("\n"); // end of sample
}

View File

@ -11,7 +11,7 @@
#include "packager/file/file.h"
#include "packager/file/file_closer.h"
#include "packager/media/base/muxer_util.h"
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
#include "packager/media/formats/webvtt/webvtt_utils.h"
#include "packager/status_macros.h"
namespace shaka {
@ -75,14 +75,12 @@ Status WebVttMuxer::Finalize() {
Status WebVttMuxer::AddTextSample(size_t stream_id, const TextSample& sample) {
// Ignore sync samples.
if (sample.payload().empty()) {
if (sample.body().is_empty()) {
return Status::OK;
}
if (sample.id().find('\n') != std::string::npos ||
sample.settings().find('\n') != std::string::npos) {
return Status(error::MUXER_FAILURE,
"Text id/settings cannot contain newlines");
if (sample.id().find('\n') != std::string::npos) {
return Status(error::MUXER_FAILURE, "Text id cannot contain newlines");
}
last_cue_ms_ = sample.EndTime();

View File

@ -11,7 +11,7 @@
#include "packager/base/strings/string_util.h"
#include "packager/media/base/text_sample.h"
#include "packager/media/base/text_stream_info.h"
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
#include "packager/media/formats/webvtt/webvtt_utils.h"
namespace shaka {
namespace media {
@ -235,20 +235,26 @@ bool WebVttParser::ParseCue(const std::string& id,
return true;
}
std::shared_ptr<TextSample> sample = std::make_shared<TextSample>();
sample->set_id(id);
sample->SetTime(start_time, end_time);
// The rest of time_and_style are the style tokens.
TextSettings settings;
for (size_t i = 3; i < time_and_style.size(); i++) {
sample->AppendStyle(time_and_style[i]);
if (!settings.settings.empty()) {
settings.settings += " ";
}
settings.settings += time_and_style[i];
}
// The rest of the block is the payload.
TextFragment body;
for (size_t i = 1; i < block_size; i++) {
sample->AppendPayload(block[i]);
if (i > 1) {
body.body += "\n";
}
body.body += block[i];
}
auto sample =
std::make_shared<TextSample>(id, start_time, end_time, settings, body);
return new_text_sample_cb_.Run(kStreamIndex, sample);
}
@ -274,5 +280,6 @@ void WebVttParser::DispatchTextStreamInfo() {
style_region_config_, kNoWidth, kNoHeight, kNoLanguage));
init_cb_.Run(streams);
}
} // namespace media
} // namespace shaka

View File

@ -178,8 +178,8 @@ TEST_F(WebVttParserTest, ParseOneCue) {
EXPECT_EQ(samples_[0]->id(), kNoId);
EXPECT_EQ(samples_[0]->start_time(), 60000u);
EXPECT_EQ(samples_[0]->duration(), 3540000u);
EXPECT_EQ(samples_[0]->settings(), kNoSettings);
EXPECT_EQ(samples_[0]->payload(), "subtitle");
EXPECT_EQ(samples_[0]->settings().settings, kNoSettings);
EXPECT_EQ(samples_[0]->body().body, "subtitle");
}
TEST_F(WebVttParserTest, ParseOneCueWithStyleAndRegion) {
@ -214,8 +214,7 @@ TEST_F(WebVttParserTest, ParseOneCueWithStyleAndRegion) {
EXPECT_EQ(samples_[0]->id(), kNoId);
EXPECT_EQ(samples_[0]->start_time(), 60000u);
EXPECT_EQ(samples_[0]->duration(), 3540000u);
EXPECT_EQ(samples_[0]->settings(), kNoSettings);
EXPECT_EQ(samples_[0]->payload(), "subtitle");
EXPECT_EQ(samples_[0]->body().body, "subtitle");
}
TEST_F(WebVttParserTest, ParseOneEmptyCue) {
@ -232,7 +231,7 @@ TEST_F(WebVttParserTest, ParseOneEmptyCue) {
ASSERT_EQ(streams_.size(), 1u);
ASSERT_EQ(samples_.size(), 1u);
EXPECT_EQ(samples_[0]->payload(), "");
EXPECT_EQ(samples_[0]->body().body, "");
}
TEST_F(WebVttParserTest, FailToParseCueWithArrowInId) {
@ -265,7 +264,7 @@ TEST_F(WebVttParserTest, ParseOneCueWithId) {
ASSERT_EQ(streams_.size(), 1u);
ASSERT_EQ(samples_.size(), 1u);
EXPECT_EQ(samples_[0]->id(), "id");
EXPECT_EQ(samples_[0]->payload(), "subtitle");
EXPECT_EQ(samples_[0]->body().body, "subtitle");
}
TEST_F(WebVttParserTest, ParseOneEmptyCueWithId) {
@ -284,7 +283,7 @@ TEST_F(WebVttParserTest, ParseOneEmptyCueWithId) {
ASSERT_EQ(streams_.size(), 1u);
ASSERT_EQ(samples_.size(), 1u);
EXPECT_EQ(samples_[0]->id(), "id");
EXPECT_EQ(samples_[0]->payload(), "");
EXPECT_EQ(samples_[0]->body().body, "");
}
TEST_F(WebVttParserTest, ParseOneCueWithSettings) {
@ -301,7 +300,7 @@ TEST_F(WebVttParserTest, ParseOneCueWithSettings) {
ASSERT_EQ(streams_.size(), 1u);
ASSERT_EQ(samples_.size(), 1u);
EXPECT_EQ(samples_[0]->settings(), "size:50%");
EXPECT_EQ(samples_[0]->settings().settings, "size:50%");
}
// Verify that a typical case with mulitple cues work.
@ -328,13 +327,13 @@ TEST_F(WebVttParserTest, ParseMultipleCues) {
EXPECT_EQ(samples_[0]->start_time(), 1000u);
EXPECT_EQ(samples_[0]->duration(), 4200u);
EXPECT_EQ(samples_[0]->payload(), "subtitle A");
EXPECT_EQ(samples_[0]->body().body, "subtitle A");
EXPECT_EQ(samples_[1]->start_time(), 2321u);
EXPECT_EQ(samples_[1]->duration(), 4679u);
EXPECT_EQ(samples_[1]->payload(), "subtitle B");
EXPECT_EQ(samples_[1]->body().body, "subtitle B");
EXPECT_EQ(samples_[2]->start_time(), 5800u);
EXPECT_EQ(samples_[2]->duration(), 2200u);
EXPECT_EQ(samples_[2]->payload(), "subtitle C");
EXPECT_EQ(samples_[2]->body().body, "subtitle C");
}
// Verify that a typical case with mulitple cues work even when comments are
@ -370,9 +369,10 @@ TEST_F(WebVttParserTest, ParseWithComments) {
ASSERT_EQ(streams_.size(), 1u);
ASSERT_EQ(samples_.size(), 3u);
EXPECT_EQ(samples_[0]->payload(), "subtitle A");
EXPECT_EQ(samples_[1]->payload(), "subtitle B");
EXPECT_EQ(samples_[2]->payload(), "subtitle C");
EXPECT_EQ(samples_[0]->body().body, "subtitle A");
EXPECT_EQ(samples_[1]->body().body, "subtitle B");
EXPECT_EQ(samples_[2]->body().body, "subtitle C");
}
} // namespace media
} // namespace shaka

View File

@ -12,6 +12,7 @@
#include "packager/media/base/buffer_writer.h"
#include "packager/media/formats/mp4/box_buffer.h"
#include "packager/media/formats/mp4/box_definitions.h"
#include "packager/media/formats/webvtt/webvtt_utils.h"
#include "packager/status_macros.h"
namespace shaka {
@ -58,12 +59,8 @@ void WriteSample(const TextSample& sample, BufferWriter* out) {
if (sample.id().length()) {
box.cue_id.cue_id = sample.id();
}
if (sample.settings().length()) {
box.cue_settings.settings = sample.settings();
}
if (sample.payload().length()) {
box.cue_payload.cue_text = sample.payload();
}
box.cue_settings.settings = WebVttSettingsToString(sample.settings());
box.cue_payload.cue_text = WebVttFragmentToString(sample.body());
// If there is internal timing, i.e. WebVTT cue timestamp, then
// cue_current_time should be populated
@ -173,7 +170,7 @@ Status WebVttToMp4Handler::OnTextSample(
// Ignore empty samples. This will create gaps, but we will handle that
// later.
if (sample->payload().empty()) {
if (sample->body().is_empty()) {
return Status::OK;
}

View File

@ -4,7 +4,7 @@
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
#include "packager/media/formats/webvtt/webvtt_utils.h"
#include <ctype.h>
#include <inttypes.h>
@ -15,6 +15,7 @@
namespace shaka {
namespace media {
namespace {
bool GetTotalMilliseconds(uint64_t hours,
@ -82,5 +83,14 @@ std::string MsToWebVttTimestamp(uint64_t ms) {
".%03" PRIu64,
only_hours, only_minutes, only_seconds, only_ms);
}
std::string WebVttSettingsToString(const TextSettings& settings) {
return settings.settings;
}
std::string WebVttFragmentToString(const TextFragment& fragment) {
return fragment.body;
}
} // namespace media
} // namespace shaka

View File

@ -4,17 +4,21 @@
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd
#ifndef PACKAGER_MEDIA_FORMATS_WEBVTT_TIMESTAMP_H_
#define PACKAGER_MEDIA_FORMATS_WEBVTT_TIMESTAMP_H_
#ifndef PACKAGER_MEDIA_FORMATS_WEBVTT_UTILS_H_
#define PACKAGER_MEDIA_FORMATS_WEBVTT_UTILS_H_
#include <stdint.h>
#include <list>
#include <memory>
#include <string>
#include "packager/base/strings/string_piece.h"
#include "packager/media/base/text_sample.h"
namespace shaka {
namespace media {
// Parse a timestamp into milliseconds using the two patterns defined by WebVtt:
// LONG : ##:##:##.### (long can have 2 or more hour digits)
// SHORT : ##:##:###
@ -22,7 +26,14 @@ bool WebVttTimestampToMs(const base::StringPiece& source, uint64_t* out);
// Create a long form timestamp encoded as a string.
std::string MsToWebVttTimestamp(uint64_t ms);
/// Converts the given text settings to a WebVTT settings string.
std::string WebVttSettingsToString(const TextSettings& settings);
/// Converts the given TextFragment to a WebVTT cue body string.
std::string WebVttFragmentToString(const TextFragment& fragment);
} // namespace media
} // namespace shaka
#endif // PACKAGER_MEDIA_FORMATS_WEBVTT_TIMESTAMP_H_
#endif // PACKAGER_MEDIA_FORMATS_WEBVTT_UTILS_H_

View File

@ -6,7 +6,7 @@
#include <gtest/gtest.h>
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
#include "packager/media/formats/webvtt/webvtt_utils.h"
namespace shaka {
namespace media {