Add text fragment and setting types.
This sets the groundwork for more generic text cues by having a more generic object for the settings and the body. This also changes the TextSample to be immutable and accepts the fields in the constructor instead of using setters. Change-Id: I76b09ce8e8471a49e6bf447e8c187f867728a4bf
This commit is contained in:
parent
56908a83a7
commit
b2220eb0c6
|
@ -266,12 +266,8 @@ std::unique_ptr<TextSample> MediaHandlerTestBase::GetTextSample(
|
||||||
int64_t start,
|
int64_t start,
|
||||||
int64_t end,
|
int64_t end,
|
||||||
const std::string& payload) const {
|
const std::string& payload) const {
|
||||||
std::unique_ptr<TextSample> sample(new TextSample);
|
return std::unique_ptr<TextSample>{
|
||||||
sample->set_id(id);
|
new TextSample(id, start, end, {}, TextFragment{payload})};
|
||||||
sample->SetTime(start, end);
|
|
||||||
sample->AppendPayload(payload);
|
|
||||||
|
|
||||||
return sample;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<CueEvent> MediaHandlerTestBase::GetCueEvent(
|
std::unique_ptr<CueEvent> MediaHandlerTestBase::GetCueEvent(
|
||||||
|
|
|
@ -208,14 +208,7 @@ MATCHER_P5(IsMediaSample,
|
||||||
"is_key_frame");
|
"is_key_frame");
|
||||||
}
|
}
|
||||||
|
|
||||||
MATCHER_P6(IsTextSample,
|
MATCHER_P4(IsTextSample, stream_index, id, start_time, end_time, "") {
|
||||||
stream_index,
|
|
||||||
id,
|
|
||||||
start_time,
|
|
||||||
end_time,
|
|
||||||
settings,
|
|
||||||
payload,
|
|
||||||
"") {
|
|
||||||
if (!TryMatchStreamDataType(arg->stream_data_type,
|
if (!TryMatchStreamDataType(arg->stream_data_type,
|
||||||
StreamDataType::kTextSample, result_listener)) {
|
StreamDataType::kTextSample, result_listener)) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -224,9 +217,7 @@ MATCHER_P6(IsTextSample,
|
||||||
*result_listener << "which is (" << arg->stream_index << ", "
|
*result_listener << "which is (" << arg->stream_index << ", "
|
||||||
<< ToPrettyString(arg->text_sample->id()) << ", "
|
<< ToPrettyString(arg->text_sample->id()) << ", "
|
||||||
<< arg->text_sample->start_time() << ", "
|
<< arg->text_sample->start_time() << ", "
|
||||||
<< arg->text_sample->EndTime() << ", "
|
<< arg->text_sample->EndTime() << ")";
|
||||||
<< ToPrettyString(arg->text_sample->settings()) << ", "
|
|
||||||
<< ToPrettyString(arg->text_sample->payload()) << ")";
|
|
||||||
|
|
||||||
return TryMatch(arg->stream_index, stream_index, result_listener,
|
return TryMatch(arg->stream_index, stream_index, result_listener,
|
||||||
"stream_index") &&
|
"stream_index") &&
|
||||||
|
@ -234,11 +225,7 @@ MATCHER_P6(IsTextSample,
|
||||||
TryMatch(arg->text_sample->start_time(), start_time, result_listener,
|
TryMatch(arg->text_sample->start_time(), start_time, result_listener,
|
||||||
"start_time") &&
|
"start_time") &&
|
||||||
TryMatch(arg->text_sample->EndTime(), end_time, result_listener,
|
TryMatch(arg->text_sample->EndTime(), end_time, result_listener,
|
||||||
"EndTime") &&
|
"EndTime");
|
||||||
TryMatch(arg->text_sample->settings(), settings, result_listener,
|
|
||||||
"settings") &&
|
|
||||||
TryMatch(arg->text_sample->payload(), payload, result_listener,
|
|
||||||
"payload");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
MATCHER_P2(IsCueEvent, stream_index, time_in_seconds, "") {
|
MATCHER_P2(IsCueEvent, stream_index, time_in_seconds, "") {
|
||||||
|
|
|
@ -11,31 +11,24 @@
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
|
|
||||||
|
bool TextFragment::is_empty() const {
|
||||||
|
return body.empty();
|
||||||
|
}
|
||||||
|
|
||||||
|
TextSample::TextSample(const std::string& id,
|
||||||
|
int64_t start_time,
|
||||||
|
int64_t end_time,
|
||||||
|
const TextSettings& settings,
|
||||||
|
const TextFragment& body)
|
||||||
|
: id_(id),
|
||||||
|
start_time_(start_time),
|
||||||
|
duration_(end_time - start_time),
|
||||||
|
settings_(settings),
|
||||||
|
body_(body) {}
|
||||||
|
|
||||||
int64_t TextSample::EndTime() const {
|
int64_t TextSample::EndTime() const {
|
||||||
return start_time_ + duration_;
|
return start_time_ + duration_;
|
||||||
}
|
}
|
||||||
|
|
||||||
void TextSample::SetTime(int64_t start_time, int64_t end_time) {
|
|
||||||
DCHECK_GE(start_time, 0);
|
|
||||||
DCHECK_GT(end_time, 0);
|
|
||||||
DCHECK_LT(start_time, end_time);
|
|
||||||
start_time_ = start_time;
|
|
||||||
duration_ = end_time - start_time;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TextSample::AppendStyle(const std::string& style) {
|
|
||||||
if (settings_.length()) {
|
|
||||||
settings_ += " ";
|
|
||||||
}
|
|
||||||
settings_ += style;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TextSample::AppendPayload(const std::string& payload) {
|
|
||||||
if (payload_.length()) {
|
|
||||||
payload_ += "\n";
|
|
||||||
}
|
|
||||||
payload_ += payload;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
} // namespace shaka
|
} // namespace shaka
|
||||||
|
|
|
@ -14,32 +14,43 @@
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
|
|
||||||
|
struct TextSettings {
|
||||||
|
// TODO(modmaker): Convert to generic structure.
|
||||||
|
std::string settings;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct TextFragment {
|
||||||
|
// TODO(modmaker): Fill with settings and sub-fragments.
|
||||||
|
std::string body;
|
||||||
|
|
||||||
|
bool is_empty() const;
|
||||||
|
};
|
||||||
|
|
||||||
class TextSample {
|
class TextSample {
|
||||||
public:
|
public:
|
||||||
TextSample() = default;
|
TextSample(const std::string& id,
|
||||||
|
int64_t start_time,
|
||||||
|
int64_t end_time,
|
||||||
|
const TextSettings& settings,
|
||||||
|
const TextFragment& body);
|
||||||
|
|
||||||
const std::string& id() const { return id_; }
|
const std::string& id() const { return id_; }
|
||||||
int64_t start_time() const { return start_time_; }
|
int64_t start_time() const { return start_time_; }
|
||||||
int64_t duration() const { return duration_; }
|
int64_t duration() const { return duration_; }
|
||||||
const std::string& settings() const { return settings_; }
|
const TextSettings& settings() const { return settings_; }
|
||||||
const std::string& payload() const { return payload_; }
|
const TextFragment& body() const { return body_; }
|
||||||
int64_t EndTime() const;
|
int64_t EndTime() const;
|
||||||
|
|
||||||
void set_id(const std::string& id) { id_ = id; }
|
|
||||||
void SetTime(int64_t start_time, int64_t end_time);
|
|
||||||
void AppendStyle(const std::string& style);
|
|
||||||
void AppendPayload(const std::string& payload);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
// Allow the compiler generated copy constructor and assignment operator
|
// Allow the compiler generated copy constructor and assignment operator
|
||||||
// intentionally. Since the text data is typically small, the performance
|
// intentionally. Since the text data is typically small, the performance
|
||||||
// impact is minimal.
|
// impact is minimal.
|
||||||
|
|
||||||
std::string id_;
|
const std::string id_;
|
||||||
int64_t start_time_ = 0;
|
const int64_t start_time_ = 0;
|
||||||
int64_t duration_ = 0;
|
const int64_t duration_ = 0;
|
||||||
std::string settings_;
|
const TextSettings settings_;
|
||||||
std::string payload_;
|
const TextFragment body_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
|
|
|
@ -204,15 +204,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithNoCues) {
|
||||||
|
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
*Output(kTextStream),
|
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
|
||||||
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
EXPECT_CALL(
|
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
|
||||||
*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
|
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
|
||||||
EXPECT_CALL(
|
|
||||||
*Output(kTextStream),
|
|
||||||
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
|
|
||||||
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -247,15 +244,12 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithNoCues) {
|
||||||
|
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
*Output(kTextStream),
|
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
|
||||||
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
EXPECT_CALL(
|
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
|
||||||
*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
|
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
|
||||||
EXPECT_CALL(
|
|
||||||
*Output(kTextStream),
|
|
||||||
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
|
|
||||||
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,17 +434,14 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCues) {
|
||||||
|
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
*Output(kTextStream),
|
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
|
||||||
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
|
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsCueEvent(_, kSample1StartInSeconds)));
|
OnProcess(IsCueEvent(_, kSample1StartInSeconds)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
*Output(kTextStream),
|
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
|
||||||
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
EXPECT_CALL(
|
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
|
||||||
*Output(kTextStream),
|
|
||||||
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
|
|
||||||
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -491,15 +482,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCueAfterLastStart) {
|
||||||
|
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
*Output(kTextStream),
|
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
|
||||||
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
EXPECT_CALL(
|
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
|
||||||
*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
|
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
|
||||||
EXPECT_CALL(
|
|
||||||
*Output(kTextStream),
|
|
||||||
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
|
|
||||||
// Cue before the sample end is processed.
|
// Cue before the sample end is processed.
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsCueEvent(_, kCue1TimeInSeconds)));
|
OnProcess(IsCueEvent(_, kCue1TimeInSeconds)));
|
||||||
|
@ -543,17 +531,14 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) {
|
||||||
|
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
OnProcess(IsStreamInfo(_, kMsTimeScale, _, _)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
*Output(kTextStream),
|
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End)));
|
||||||
OnProcess(IsTextSample(_, _, kSample0Start, kSample0End, _, _)));
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
EXPECT_CALL(
|
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End)));
|
||||||
*Output(kTextStream),
|
|
||||||
OnProcess(IsTextSample(_, _, kSample1Start, kSample1End, _, _)));
|
|
||||||
EXPECT_CALL(*Output(kTextStream),
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
OnProcess(IsCueEvent(_, kSample2StartInSeconds)));
|
OnProcess(IsCueEvent(_, kSample2StartInSeconds)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(*Output(kTextStream),
|
||||||
*Output(kTextStream),
|
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End)));
|
||||||
OnProcess(IsTextSample(_, _, kSample2Start, kSample2End, _, _)));
|
|
||||||
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
EXPECT_CALL(*Output(kTextStream), OnFlush(_));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -32,7 +32,6 @@ const bool kSubSegment = true;
|
||||||
const uint64_t kTimescaleMs = 1000;
|
const uint64_t kTimescaleMs = 1000;
|
||||||
|
|
||||||
const char* kNoId = "";
|
const char* kNoId = "";
|
||||||
const char* kNoSettings = "";
|
|
||||||
const char* kNoPayload = "";
|
const char* kNoPayload = "";
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
|
@ -70,13 +69,13 @@ TEST_F(TextChunkerTest, SegmentsStartAtFirstSample) {
|
||||||
testing::InSequence s;
|
testing::InSequence s;
|
||||||
|
|
||||||
EXPECT_CALL(*Output(kOutput), OnProcess(IsStreamInfo(_, _, _, _)));
|
EXPECT_CALL(*Output(kOutput), OnProcess(IsStreamInfo(_, _, _, _)));
|
||||||
EXPECT_CALL(*Output(kOutput), OnProcess(IsTextSample(_, _, kSampleAStart,
|
EXPECT_CALL(*Output(kOutput),
|
||||||
kSampleAEnd, _, _)));
|
OnProcess(IsTextSample(_, _, kSampleAStart, kSampleAEnd)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(
|
||||||
*Output(kOutput),
|
*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(_, kSegment0Start, kSegmentDurationMs, _, _)));
|
OnProcess(IsSegmentInfo(_, kSegment0Start, kSegmentDurationMs, _, _)));
|
||||||
EXPECT_CALL(*Output(kOutput), OnProcess(IsTextSample(_, _, kSampleAStart,
|
EXPECT_CALL(*Output(kOutput),
|
||||||
kSampleAEnd, _, _)));
|
OnProcess(IsTextSample(_, _, kSampleAStart, kSampleAEnd)));
|
||||||
EXPECT_CALL(
|
EXPECT_CALL(
|
||||||
*Output(kOutput),
|
*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(_, kSegment1Start, kSegmentDurationMs, _, _)));
|
OnProcess(IsSegmentInfo(_, kSegment1Start, kSegmentDurationMs, _, _)));
|
||||||
|
@ -120,8 +119,7 @@ TEST_F(TextChunkerTest, SampleEndingOnSegmentStart) {
|
||||||
OnProcess(IsStreamInfo(kStreamIndex, _, _, _)));
|
OnProcess(IsStreamInfo(kStreamIndex, _, _, _)));
|
||||||
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -172,8 +170,7 @@ TEST_F(TextChunkerTest, CreatesSegmentsForSamples) {
|
||||||
|
|
||||||
// Segment One
|
// Segment One
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -181,8 +178,7 @@ TEST_F(TextChunkerTest, CreatesSegmentsForSamples) {
|
||||||
|
|
||||||
// Segment Two
|
// Segment Two
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -238,8 +234,7 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) {
|
||||||
|
|
||||||
// Segment One
|
// Segment One
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -253,8 +248,7 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) {
|
||||||
|
|
||||||
// Segment Three
|
// Segment Three
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -305,8 +299,7 @@ TEST_F(TextChunkerTest, SampleCrossesSegments) {
|
||||||
|
|
||||||
// Segment One
|
// Segment One
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -314,8 +307,7 @@ TEST_F(TextChunkerTest, SampleCrossesSegments) {
|
||||||
|
|
||||||
// Segment Two
|
// Segment Two
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -375,39 +367,39 @@ TEST_F(TextChunkerTest, PreservesOrder) {
|
||||||
OnProcess(IsStreamInfo(kStreamIndex, _, _, _)));
|
OnProcess(IsStreamInfo(kStreamIndex, _, _, _)));
|
||||||
|
|
||||||
// Segment One
|
// Segment One
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(
|
||||||
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart,
|
*Output(kOutput),
|
||||||
kSampleAEnd, kNoSettings, kNoPayload)));
|
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart, kSampleAEnd)));
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(
|
||||||
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart,
|
*Output(kOutput),
|
||||||
kSampleBEnd, kNoSettings, kNoPayload)));
|
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart, kSampleBEnd)));
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(
|
||||||
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart,
|
*Output(kOutput),
|
||||||
kSampleCEnd, kNoSettings, kNoPayload)));
|
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart, kSampleCEnd)));
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
!kEncrypted)));
|
!kEncrypted)));
|
||||||
|
|
||||||
// Segment Two
|
// Segment Two
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(
|
||||||
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart,
|
*Output(kOutput),
|
||||||
kSampleAEnd, kNoSettings, kNoPayload)));
|
OnProcess(IsTextSample(_, kSampleAId, kSampleAStart, kSampleAEnd)));
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(
|
||||||
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart,
|
*Output(kOutput),
|
||||||
kSampleBEnd, kNoSettings, kNoPayload)));
|
OnProcess(IsTextSample(_, kSampleBId, kSampleBStart, kSampleBEnd)));
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(
|
||||||
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart,
|
*Output(kOutput),
|
||||||
kSampleCEnd, kNoSettings, kNoPayload)));
|
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart, kSampleCEnd)));
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
!kEncrypted)));
|
!kEncrypted)));
|
||||||
|
|
||||||
// Segment Two
|
// Segment Two
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(
|
||||||
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart,
|
*Output(kOutput),
|
||||||
kSampleCEnd, kNoSettings, kNoPayload)));
|
OnProcess(IsTextSample(_, kSampleCId, kSampleCStart, kSampleCEnd)));
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -468,8 +460,7 @@ TEST_F(TextChunkerTest, NestedSamples) {
|
||||||
|
|
||||||
// Segment 0
|
// Segment 0
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -477,11 +468,9 @@ TEST_F(TextChunkerTest, NestedSamples) {
|
||||||
|
|
||||||
// Segment 1
|
// Segment 1
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -489,11 +478,9 @@ TEST_F(TextChunkerTest, NestedSamples) {
|
||||||
|
|
||||||
// Segment 2
|
// Segment 2
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -501,11 +488,9 @@ TEST_F(TextChunkerTest, NestedSamples) {
|
||||||
|
|
||||||
// Segment 3
|
// Segment 3
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment3Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment3Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -513,8 +498,7 @@ TEST_F(TextChunkerTest, NestedSamples) {
|
||||||
|
|
||||||
// Segment 4
|
// Segment 4
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment4Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment4Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -570,8 +554,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
|
||||||
|
|
||||||
// Segment One
|
// Segment One
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -579,8 +562,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
|
||||||
|
|
||||||
// Segment Two
|
// Segment Two
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -588,8 +570,7 @@ TEST_F(TextChunkerTest, SecondSampleStartsAfterMultiSegmentSampleEnds) {
|
||||||
|
|
||||||
// Segment Three
|
// Segment Three
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleBStart, kSampleBEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
@ -650,8 +631,7 @@ TEST_F(TextChunkerTest, SampleSpanningMultipleCues) {
|
||||||
|
|
||||||
// Segment 0 and Cue 0
|
// Segment 0 and Cue 0
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment0Start,
|
||||||
kSegment0StartLength, !kSubSegment,
|
kSegment0StartLength, !kSubSegment,
|
||||||
|
@ -660,8 +640,7 @@ TEST_F(TextChunkerTest, SampleSpanningMultipleCues) {
|
||||||
|
|
||||||
// Segment 1 and Cue 1
|
// Segment 1 and Cue 1
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment1Start,
|
||||||
kSegment1StartLength, !kSubSegment,
|
kSegment1StartLength, !kSubSegment,
|
||||||
|
@ -670,8 +649,7 @@ TEST_F(TextChunkerTest, SampleSpanningMultipleCues) {
|
||||||
|
|
||||||
// Segment 2
|
// Segment 2
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd,
|
OnProcess(IsTextSample(_, kNoId, kSampleAStart, kSampleAEnd)));
|
||||||
kNoSettings, kNoPayload)));
|
|
||||||
EXPECT_CALL(*Output(kOutput),
|
EXPECT_CALL(*Output(kOutput),
|
||||||
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
OnProcess(IsSegmentInfo(kStreamIndex, kSegment2Start,
|
||||||
kSegmentDurationMs, !kSubSegment,
|
kSegmentDurationMs, !kSubSegment,
|
||||||
|
|
|
@ -45,8 +45,10 @@ Status TextPadder::OnTextSample(std::unique_ptr<StreamData> data) {
|
||||||
// sample right away. If there will be one, create an empty sample that will
|
// sample right away. If there will be one, create an empty sample that will
|
||||||
// fill in that gap.
|
// fill in that gap.
|
||||||
if (sample.start_time() > max_end_time_ms_) {
|
if (sample.start_time() > max_end_time_ms_) {
|
||||||
std::shared_ptr<TextSample> filler = std::make_shared<TextSample>();
|
const std::string kNoId = "";
|
||||||
filler->SetTime(max_end_time_ms_, sample.start_time());
|
auto filler = std::make_shared<TextSample>(kNoId, max_end_time_ms_,
|
||||||
|
sample.start_time(),
|
||||||
|
TextSettings{}, TextFragment{});
|
||||||
RETURN_IF_ERROR(
|
RETURN_IF_ERROR(
|
||||||
MediaHandler::DispatchTextSample(kStreamIndex, std::move(filler)));
|
MediaHandler::DispatchTextSample(kStreamIndex, std::move(filler)));
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,10 +23,10 @@
|
||||||
'webvtt_muxer.h',
|
'webvtt_muxer.h',
|
||||||
'webvtt_parser.cc',
|
'webvtt_parser.cc',
|
||||||
'webvtt_parser.h',
|
'webvtt_parser.h',
|
||||||
'webvtt_timestamp.cc',
|
|
||||||
'webvtt_timestamp.h',
|
|
||||||
'webvtt_to_mp4_handler.cc',
|
'webvtt_to_mp4_handler.cc',
|
||||||
'webvtt_to_mp4_handler.h',
|
'webvtt_to_mp4_handler.h',
|
||||||
|
'webvtt_utils.cc',
|
||||||
|
'webvtt_utils.h',
|
||||||
],
|
],
|
||||||
'dependencies': [
|
'dependencies': [
|
||||||
'../../../base/base.gyp:base',
|
'../../../base/base.gyp:base',
|
||||||
|
@ -42,7 +42,7 @@
|
||||||
'text_readers_unittest.cc',
|
'text_readers_unittest.cc',
|
||||||
'webvtt_muxer_unittest.cc',
|
'webvtt_muxer_unittest.cc',
|
||||||
'webvtt_parser_unittest.cc',
|
'webvtt_parser_unittest.cc',
|
||||||
'webvtt_timestamp_unittest.cc',
|
'webvtt_utils_unittest.cc',
|
||||||
'webvtt_to_mp4_handler_unittest.cc',
|
'webvtt_to_mp4_handler_unittest.cc',
|
||||||
],
|
],
|
||||||
'dependencies': [
|
'dependencies': [
|
||||||
|
|
|
@ -8,7 +8,7 @@
|
||||||
|
|
||||||
#include "packager/base/strings/stringprintf.h"
|
#include "packager/base/strings/stringprintf.h"
|
||||||
#include "packager/media/base/text_sample.h"
|
#include "packager/media/base/text_sample.h"
|
||||||
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
|
#include "packager/media/formats/webvtt/webvtt_utils.h"
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
|
@ -61,15 +61,14 @@ void WebVttFileBuffer::Append(const TextSample& sample) {
|
||||||
buffer_.append(MsToWebVttTimestamp(sample.start_time()));
|
buffer_.append(MsToWebVttTimestamp(sample.start_time()));
|
||||||
buffer_.append(" --> ");
|
buffer_.append(" --> ");
|
||||||
buffer_.append(MsToWebVttTimestamp(sample.EndTime()));
|
buffer_.append(MsToWebVttTimestamp(sample.EndTime()));
|
||||||
|
const std::string settings = WebVttSettingsToString(sample.settings());
|
||||||
// Settings are optional
|
if (!settings.empty()) {
|
||||||
if (sample.settings().length()) {
|
|
||||||
buffer_.append(" ");
|
buffer_.append(" ");
|
||||||
buffer_.append(sample.settings());
|
buffer_.append(settings);
|
||||||
}
|
}
|
||||||
buffer_.append("\n"); // end of time & settings
|
buffer_.append("\n"); // end of time & settings
|
||||||
|
|
||||||
buffer_.append(sample.payload());
|
buffer_.append(WebVttFragmentToString(sample.body()));
|
||||||
buffer_.append("\n"); // end of payload
|
buffer_.append("\n"); // end of payload
|
||||||
buffer_.append("\n"); // end of sample
|
buffer_.append("\n"); // end of sample
|
||||||
}
|
}
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
#include "packager/file/file.h"
|
#include "packager/file/file.h"
|
||||||
#include "packager/file/file_closer.h"
|
#include "packager/file/file_closer.h"
|
||||||
#include "packager/media/base/muxer_util.h"
|
#include "packager/media/base/muxer_util.h"
|
||||||
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
|
#include "packager/media/formats/webvtt/webvtt_utils.h"
|
||||||
#include "packager/status_macros.h"
|
#include "packager/status_macros.h"
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
|
@ -75,14 +75,12 @@ Status WebVttMuxer::Finalize() {
|
||||||
|
|
||||||
Status WebVttMuxer::AddTextSample(size_t stream_id, const TextSample& sample) {
|
Status WebVttMuxer::AddTextSample(size_t stream_id, const TextSample& sample) {
|
||||||
// Ignore sync samples.
|
// Ignore sync samples.
|
||||||
if (sample.payload().empty()) {
|
if (sample.body().is_empty()) {
|
||||||
return Status::OK;
|
return Status::OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sample.id().find('\n') != std::string::npos ||
|
if (sample.id().find('\n') != std::string::npos) {
|
||||||
sample.settings().find('\n') != std::string::npos) {
|
return Status(error::MUXER_FAILURE, "Text id cannot contain newlines");
|
||||||
return Status(error::MUXER_FAILURE,
|
|
||||||
"Text id/settings cannot contain newlines");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
last_cue_ms_ = sample.EndTime();
|
last_cue_ms_ = sample.EndTime();
|
||||||
|
|
|
@ -11,7 +11,7 @@
|
||||||
#include "packager/base/strings/string_util.h"
|
#include "packager/base/strings/string_util.h"
|
||||||
#include "packager/media/base/text_sample.h"
|
#include "packager/media/base/text_sample.h"
|
||||||
#include "packager/media/base/text_stream_info.h"
|
#include "packager/media/base/text_stream_info.h"
|
||||||
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
|
#include "packager/media/formats/webvtt/webvtt_utils.h"
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
|
@ -235,20 +235,26 @@ bool WebVttParser::ParseCue(const std::string& id,
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<TextSample> sample = std::make_shared<TextSample>();
|
|
||||||
sample->set_id(id);
|
|
||||||
sample->SetTime(start_time, end_time);
|
|
||||||
|
|
||||||
// The rest of time_and_style are the style tokens.
|
// The rest of time_and_style are the style tokens.
|
||||||
|
TextSettings settings;
|
||||||
for (size_t i = 3; i < time_and_style.size(); i++) {
|
for (size_t i = 3; i < time_and_style.size(); i++) {
|
||||||
sample->AppendStyle(time_and_style[i]);
|
if (!settings.settings.empty()) {
|
||||||
|
settings.settings += " ";
|
||||||
|
}
|
||||||
|
settings.settings += time_and_style[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
// The rest of the block is the payload.
|
// The rest of the block is the payload.
|
||||||
|
TextFragment body;
|
||||||
for (size_t i = 1; i < block_size; i++) {
|
for (size_t i = 1; i < block_size; i++) {
|
||||||
sample->AppendPayload(block[i]);
|
if (i > 1) {
|
||||||
|
body.body += "\n";
|
||||||
|
}
|
||||||
|
body.body += block[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto sample =
|
||||||
|
std::make_shared<TextSample>(id, start_time, end_time, settings, body);
|
||||||
return new_text_sample_cb_.Run(kStreamIndex, sample);
|
return new_text_sample_cb_.Run(kStreamIndex, sample);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,5 +280,6 @@ void WebVttParser::DispatchTextStreamInfo() {
|
||||||
style_region_config_, kNoWidth, kNoHeight, kNoLanguage));
|
style_region_config_, kNoWidth, kNoHeight, kNoLanguage));
|
||||||
init_cb_.Run(streams);
|
init_cb_.Run(streams);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
} // namespace shaka
|
} // namespace shaka
|
||||||
|
|
|
@ -178,8 +178,8 @@ TEST_F(WebVttParserTest, ParseOneCue) {
|
||||||
EXPECT_EQ(samples_[0]->id(), kNoId);
|
EXPECT_EQ(samples_[0]->id(), kNoId);
|
||||||
EXPECT_EQ(samples_[0]->start_time(), 60000u);
|
EXPECT_EQ(samples_[0]->start_time(), 60000u);
|
||||||
EXPECT_EQ(samples_[0]->duration(), 3540000u);
|
EXPECT_EQ(samples_[0]->duration(), 3540000u);
|
||||||
EXPECT_EQ(samples_[0]->settings(), kNoSettings);
|
EXPECT_EQ(samples_[0]->settings().settings, kNoSettings);
|
||||||
EXPECT_EQ(samples_[0]->payload(), "subtitle");
|
EXPECT_EQ(samples_[0]->body().body, "subtitle");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WebVttParserTest, ParseOneCueWithStyleAndRegion) {
|
TEST_F(WebVttParserTest, ParseOneCueWithStyleAndRegion) {
|
||||||
|
@ -214,8 +214,7 @@ TEST_F(WebVttParserTest, ParseOneCueWithStyleAndRegion) {
|
||||||
EXPECT_EQ(samples_[0]->id(), kNoId);
|
EXPECT_EQ(samples_[0]->id(), kNoId);
|
||||||
EXPECT_EQ(samples_[0]->start_time(), 60000u);
|
EXPECT_EQ(samples_[0]->start_time(), 60000u);
|
||||||
EXPECT_EQ(samples_[0]->duration(), 3540000u);
|
EXPECT_EQ(samples_[0]->duration(), 3540000u);
|
||||||
EXPECT_EQ(samples_[0]->settings(), kNoSettings);
|
EXPECT_EQ(samples_[0]->body().body, "subtitle");
|
||||||
EXPECT_EQ(samples_[0]->payload(), "subtitle");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WebVttParserTest, ParseOneEmptyCue) {
|
TEST_F(WebVttParserTest, ParseOneEmptyCue) {
|
||||||
|
@ -232,7 +231,7 @@ TEST_F(WebVttParserTest, ParseOneEmptyCue) {
|
||||||
|
|
||||||
ASSERT_EQ(streams_.size(), 1u);
|
ASSERT_EQ(streams_.size(), 1u);
|
||||||
ASSERT_EQ(samples_.size(), 1u);
|
ASSERT_EQ(samples_.size(), 1u);
|
||||||
EXPECT_EQ(samples_[0]->payload(), "");
|
EXPECT_EQ(samples_[0]->body().body, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WebVttParserTest, FailToParseCueWithArrowInId) {
|
TEST_F(WebVttParserTest, FailToParseCueWithArrowInId) {
|
||||||
|
@ -265,7 +264,7 @@ TEST_F(WebVttParserTest, ParseOneCueWithId) {
|
||||||
ASSERT_EQ(streams_.size(), 1u);
|
ASSERT_EQ(streams_.size(), 1u);
|
||||||
ASSERT_EQ(samples_.size(), 1u);
|
ASSERT_EQ(samples_.size(), 1u);
|
||||||
EXPECT_EQ(samples_[0]->id(), "id");
|
EXPECT_EQ(samples_[0]->id(), "id");
|
||||||
EXPECT_EQ(samples_[0]->payload(), "subtitle");
|
EXPECT_EQ(samples_[0]->body().body, "subtitle");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WebVttParserTest, ParseOneEmptyCueWithId) {
|
TEST_F(WebVttParserTest, ParseOneEmptyCueWithId) {
|
||||||
|
@ -284,7 +283,7 @@ TEST_F(WebVttParserTest, ParseOneEmptyCueWithId) {
|
||||||
ASSERT_EQ(streams_.size(), 1u);
|
ASSERT_EQ(streams_.size(), 1u);
|
||||||
ASSERT_EQ(samples_.size(), 1u);
|
ASSERT_EQ(samples_.size(), 1u);
|
||||||
EXPECT_EQ(samples_[0]->id(), "id");
|
EXPECT_EQ(samples_[0]->id(), "id");
|
||||||
EXPECT_EQ(samples_[0]->payload(), "");
|
EXPECT_EQ(samples_[0]->body().body, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_F(WebVttParserTest, ParseOneCueWithSettings) {
|
TEST_F(WebVttParserTest, ParseOneCueWithSettings) {
|
||||||
|
@ -301,7 +300,7 @@ TEST_F(WebVttParserTest, ParseOneCueWithSettings) {
|
||||||
|
|
||||||
ASSERT_EQ(streams_.size(), 1u);
|
ASSERT_EQ(streams_.size(), 1u);
|
||||||
ASSERT_EQ(samples_.size(), 1u);
|
ASSERT_EQ(samples_.size(), 1u);
|
||||||
EXPECT_EQ(samples_[0]->settings(), "size:50%");
|
EXPECT_EQ(samples_[0]->settings().settings, "size:50%");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that a typical case with mulitple cues work.
|
// Verify that a typical case with mulitple cues work.
|
||||||
|
@ -328,13 +327,13 @@ TEST_F(WebVttParserTest, ParseMultipleCues) {
|
||||||
|
|
||||||
EXPECT_EQ(samples_[0]->start_time(), 1000u);
|
EXPECT_EQ(samples_[0]->start_time(), 1000u);
|
||||||
EXPECT_EQ(samples_[0]->duration(), 4200u);
|
EXPECT_EQ(samples_[0]->duration(), 4200u);
|
||||||
EXPECT_EQ(samples_[0]->payload(), "subtitle A");
|
EXPECT_EQ(samples_[0]->body().body, "subtitle A");
|
||||||
EXPECT_EQ(samples_[1]->start_time(), 2321u);
|
EXPECT_EQ(samples_[1]->start_time(), 2321u);
|
||||||
EXPECT_EQ(samples_[1]->duration(), 4679u);
|
EXPECT_EQ(samples_[1]->duration(), 4679u);
|
||||||
EXPECT_EQ(samples_[1]->payload(), "subtitle B");
|
EXPECT_EQ(samples_[1]->body().body, "subtitle B");
|
||||||
EXPECT_EQ(samples_[2]->start_time(), 5800u);
|
EXPECT_EQ(samples_[2]->start_time(), 5800u);
|
||||||
EXPECT_EQ(samples_[2]->duration(), 2200u);
|
EXPECT_EQ(samples_[2]->duration(), 2200u);
|
||||||
EXPECT_EQ(samples_[2]->payload(), "subtitle C");
|
EXPECT_EQ(samples_[2]->body().body, "subtitle C");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that a typical case with mulitple cues work even when comments are
|
// Verify that a typical case with mulitple cues work even when comments are
|
||||||
|
@ -370,9 +369,10 @@ TEST_F(WebVttParserTest, ParseWithComments) {
|
||||||
ASSERT_EQ(streams_.size(), 1u);
|
ASSERT_EQ(streams_.size(), 1u);
|
||||||
ASSERT_EQ(samples_.size(), 3u);
|
ASSERT_EQ(samples_.size(), 3u);
|
||||||
|
|
||||||
EXPECT_EQ(samples_[0]->payload(), "subtitle A");
|
EXPECT_EQ(samples_[0]->body().body, "subtitle A");
|
||||||
EXPECT_EQ(samples_[1]->payload(), "subtitle B");
|
EXPECT_EQ(samples_[1]->body().body, "subtitle B");
|
||||||
EXPECT_EQ(samples_[2]->payload(), "subtitle C");
|
EXPECT_EQ(samples_[2]->body().body, "subtitle C");
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
} // namespace shaka
|
} // namespace shaka
|
||||||
|
|
|
@ -12,6 +12,7 @@
|
||||||
#include "packager/media/base/buffer_writer.h"
|
#include "packager/media/base/buffer_writer.h"
|
||||||
#include "packager/media/formats/mp4/box_buffer.h"
|
#include "packager/media/formats/mp4/box_buffer.h"
|
||||||
#include "packager/media/formats/mp4/box_definitions.h"
|
#include "packager/media/formats/mp4/box_definitions.h"
|
||||||
|
#include "packager/media/formats/webvtt/webvtt_utils.h"
|
||||||
#include "packager/status_macros.h"
|
#include "packager/status_macros.h"
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
|
@ -58,12 +59,8 @@ void WriteSample(const TextSample& sample, BufferWriter* out) {
|
||||||
if (sample.id().length()) {
|
if (sample.id().length()) {
|
||||||
box.cue_id.cue_id = sample.id();
|
box.cue_id.cue_id = sample.id();
|
||||||
}
|
}
|
||||||
if (sample.settings().length()) {
|
box.cue_settings.settings = WebVttSettingsToString(sample.settings());
|
||||||
box.cue_settings.settings = sample.settings();
|
box.cue_payload.cue_text = WebVttFragmentToString(sample.body());
|
||||||
}
|
|
||||||
if (sample.payload().length()) {
|
|
||||||
box.cue_payload.cue_text = sample.payload();
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there is internal timing, i.e. WebVTT cue timestamp, then
|
// If there is internal timing, i.e. WebVTT cue timestamp, then
|
||||||
// cue_current_time should be populated
|
// cue_current_time should be populated
|
||||||
|
@ -173,7 +170,7 @@ Status WebVttToMp4Handler::OnTextSample(
|
||||||
|
|
||||||
// Ignore empty samples. This will create gaps, but we will handle that
|
// Ignore empty samples. This will create gaps, but we will handle that
|
||||||
// later.
|
// later.
|
||||||
if (sample->payload().empty()) {
|
if (sample->body().is_empty()) {
|
||||||
return Status::OK;
|
return Status::OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@
|
||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://developers.google.com/open-source/licenses/bsd
|
// https://developers.google.com/open-source/licenses/bsd
|
||||||
|
|
||||||
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
|
#include "packager/media/formats/webvtt/webvtt_utils.h"
|
||||||
|
|
||||||
#include <ctype.h>
|
#include <ctype.h>
|
||||||
#include <inttypes.h>
|
#include <inttypes.h>
|
||||||
|
@ -15,6 +15,7 @@
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
bool GetTotalMilliseconds(uint64_t hours,
|
bool GetTotalMilliseconds(uint64_t hours,
|
||||||
|
@ -82,5 +83,14 @@ std::string MsToWebVttTimestamp(uint64_t ms) {
|
||||||
".%03" PRIu64,
|
".%03" PRIu64,
|
||||||
only_hours, only_minutes, only_seconds, only_ms);
|
only_hours, only_minutes, only_seconds, only_ms);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::string WebVttSettingsToString(const TextSettings& settings) {
|
||||||
|
return settings.settings;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string WebVttFragmentToString(const TextFragment& fragment) {
|
||||||
|
return fragment.body;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
} // namespace shaka
|
} // namespace shaka
|
|
@ -4,17 +4,21 @@
|
||||||
// license that can be found in the LICENSE file or at
|
// license that can be found in the LICENSE file or at
|
||||||
// https://developers.google.com/open-source/licenses/bsd
|
// https://developers.google.com/open-source/licenses/bsd
|
||||||
|
|
||||||
#ifndef PACKAGER_MEDIA_FORMATS_WEBVTT_TIMESTAMP_H_
|
#ifndef PACKAGER_MEDIA_FORMATS_WEBVTT_UTILS_H_
|
||||||
#define PACKAGER_MEDIA_FORMATS_WEBVTT_TIMESTAMP_H_
|
#define PACKAGER_MEDIA_FORMATS_WEBVTT_UTILS_H_
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
#include <list>
|
||||||
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
|
|
||||||
#include "packager/base/strings/string_piece.h"
|
#include "packager/base/strings/string_piece.h"
|
||||||
|
#include "packager/media/base/text_sample.h"
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
||||||
|
|
||||||
// Parse a timestamp into milliseconds using the two patterns defined by WebVtt:
|
// Parse a timestamp into milliseconds using the two patterns defined by WebVtt:
|
||||||
// LONG : ##:##:##.### (long can have 2 or more hour digits)
|
// LONG : ##:##:##.### (long can have 2 or more hour digits)
|
||||||
// SHORT : ##:##:###
|
// SHORT : ##:##:###
|
||||||
|
@ -22,7 +26,14 @@ bool WebVttTimestampToMs(const base::StringPiece& source, uint64_t* out);
|
||||||
|
|
||||||
// Create a long form timestamp encoded as a string.
|
// Create a long form timestamp encoded as a string.
|
||||||
std::string MsToWebVttTimestamp(uint64_t ms);
|
std::string MsToWebVttTimestamp(uint64_t ms);
|
||||||
|
|
||||||
|
/// Converts the given text settings to a WebVTT settings string.
|
||||||
|
std::string WebVttSettingsToString(const TextSettings& settings);
|
||||||
|
|
||||||
|
/// Converts the given TextFragment to a WebVTT cue body string.
|
||||||
|
std::string WebVttFragmentToString(const TextFragment& fragment);
|
||||||
|
|
||||||
} // namespace media
|
} // namespace media
|
||||||
} // namespace shaka
|
} // namespace shaka
|
||||||
|
|
||||||
#endif // PACKAGER_MEDIA_FORMATS_WEBVTT_TIMESTAMP_H_
|
#endif // PACKAGER_MEDIA_FORMATS_WEBVTT_UTILS_H_
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
#include <gtest/gtest.h>
|
#include <gtest/gtest.h>
|
||||||
|
|
||||||
#include "packager/media/formats/webvtt/webvtt_timestamp.h"
|
#include "packager/media/formats/webvtt/webvtt_utils.h"
|
||||||
|
|
||||||
namespace shaka {
|
namespace shaka {
|
||||||
namespace media {
|
namespace media {
|
Loading…
Reference in New Issue