From 8f565bf38845ebdcd1a27e58c47d9b64a2ed1719 Mon Sep 17 00:00:00 2001 From: Aaron Vaage Date: Mon, 26 Mar 2018 11:04:09 -0700 Subject: [PATCH] Change Text Sample to Use int64_t Changed Text Sample to use int64_t so that it will use the same type for time as Media Sample. Change-Id: I4cfbfdc60c37bb511517993976cd1a459bdf6667 --- .../media/base/media_handler_test_base.cc | 14 +++- packager/media/base/media_handler_test_base.h | 6 +- packager/media/base/text_sample.cc | 6 +- packager/media/base/text_sample.h | 12 ++-- .../cue_alignment_handler_unittest.cc | 64 +++++++++---------- .../media/chunking/text_chunker_unittest.cc | 41 ++++++------ .../formats/webvtt/webvtt_to_mp4_handler.cc | 20 +++--- .../formats/webvtt/webvtt_to_mp4_handler.h | 10 +-- 8 files changed, 91 insertions(+), 82 deletions(-) diff --git a/packager/media/base/media_handler_test_base.cc b/packager/media/base/media_handler_test_base.cc index 6791bc5294..1130a5df1d 100644 --- a/packager/media/base/media_handler_test_base.cc +++ b/packager/media/base/media_handler_test_base.cc @@ -34,6 +34,7 @@ const bool kEncrypted = true; // Use H264 code config. const uint8_t kCodecConfig[]{ + // clang-format off // Header 0x01, 0x64, 0x00, 0x1e, 0xff, // SPS count (ignore top three bits) @@ -48,6 +49,7 @@ const uint8_t kCodecConfig[]{ // PPS 0x00, 0x06, // Size 0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0, + // clang-format on }; // Mock data, we don't really care about what is inside. @@ -221,8 +223,8 @@ std::unique_ptr MediaHandlerTestBase::GetTextStreamInfo() const { std::unique_ptr MediaHandlerTestBase::GetTextSample( const std::string& id, - uint64_t start, - uint64_t end, + int64_t start, + int64_t end, const std::string& payload) const { std::unique_ptr sample(new TextSample); sample->set_id(id); @@ -232,6 +234,14 @@ std::unique_ptr MediaHandlerTestBase::GetTextSample( return sample; } +std::unique_ptr MediaHandlerTestBase::GetCueEvent( + double time_in_seconds) const { + std::unique_ptr event(new CueEvent); + event->time_in_seconds = time_in_seconds; + + return event; +} + Status MediaHandlerTestBase::SetUpAndInitializeGraph( std::shared_ptr handler, size_t input_count, diff --git a/packager/media/base/media_handler_test_base.h b/packager/media/base/media_handler_test_base.h index cdbebe6ceb..d6f7def9e2 100644 --- a/packager/media/base/media_handler_test_base.h +++ b/packager/media/base/media_handler_test_base.h @@ -235,10 +235,12 @@ class MediaHandlerTestBase : public ::testing::Test { std::unique_ptr GetTextStreamInfo() const; std::unique_ptr GetTextSample(const std::string& id, - uint64_t start, - uint64_t end, + int64_t start, + int64_t end, const std::string& payload) const; + std::unique_ptr GetCueEvent(double time_in_seconds) const; + // Connect and initialize all handlers. Status SetUpAndInitializeGraph(std::shared_ptr handler, size_t input_count, diff --git a/packager/media/base/text_sample.cc b/packager/media/base/text_sample.cc index 871126b497..3376636c0a 100644 --- a/packager/media/base/text_sample.cc +++ b/packager/media/base/text_sample.cc @@ -11,11 +11,13 @@ namespace shaka { namespace media { -uint64_t TextSample::EndTime() const { +int64_t TextSample::EndTime() const { return start_time_ + duration_; } -void TextSample::SetTime(uint64_t start_time, uint64_t end_time) { +void TextSample::SetTime(int64_t start_time, int64_t end_time) { + DCHECK_GE(start_time, 0); + DCHECK_GT(end_time, 0); DCHECK_LT(start_time, end_time); start_time_ = start_time; duration_ = end_time - start_time; diff --git a/packager/media/base/text_sample.h b/packager/media/base/text_sample.h index 691f3e5476..1005590e1b 100644 --- a/packager/media/base/text_sample.h +++ b/packager/media/base/text_sample.h @@ -19,14 +19,14 @@ class TextSample { TextSample() = default; const std::string& id() const { return id_; } - uint64_t start_time() const { return start_time_; } - uint64_t duration() const { return duration_; } + int64_t start_time() const { return start_time_; } + int64_t duration() const { return duration_; } const std::string& settings() const { return settings_; } const std::string& payload() const { return payload_; } - uint64_t EndTime() const; + int64_t EndTime() const; void set_id(const std::string& id) { id_ = id; } - void SetTime(uint64_t start_time, uint64_t end_time); + void SetTime(int64_t start_time, int64_t end_time); void AppendStyle(const std::string& style); void AppendPayload(const std::string& payload); @@ -36,8 +36,8 @@ class TextSample { // impact is minimal. std::string id_; - uint64_t start_time_ = 0; - uint64_t duration_ = 0; + int64_t start_time_ = 0; + int64_t duration_ = 0; std::string settings_; std::string payload_; }; diff --git a/packager/media/chunking/cue_alignment_handler_unittest.cc b/packager/media/chunking/cue_alignment_handler_unittest.cc index 1dcece2da1..129a43f866 100644 --- a/packager/media/chunking/cue_alignment_handler_unittest.cc +++ b/packager/media/chunking/cue_alignment_handler_unittest.cc @@ -139,12 +139,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithNoCues) { const int64_t kSampleDuration = 1000; - const uint64_t kSample0Start = 0; - const uint64_t kSample0End = kSample0Start + kSampleDuration; - const uint64_t kSample1Start = kSample0End; - const uint64_t kSample1End = kSample1Start + kSampleDuration; - const uint64_t kSample2Start = kSample1End; - const uint64_t kSample2End = kSample2Start + kSampleDuration; + const int64_t kSample0Start = 0; + const int64_t kSample0End = kSample0Start + kSampleDuration; + const int64_t kSample1Start = kSample0End; + const int64_t kSample1End = kSample1Start + kSampleDuration; + const int64_t kSample2Start = kSample1End; + const int64_t kSample2End = kSample2Start + kSampleDuration; AdCueGeneratorParams params; SyncPointQueue sync_points(params); @@ -191,14 +191,14 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithNoCues) { const size_t kAudioStream = 1; const size_t kVideoStream = 2; - const uint64_t kSampleDuration = 1000; + const int64_t kSampleDuration = 1000; - const uint64_t kSample0Start = 0; - const uint64_t kSample0End = kSample0Start + kSampleDuration; - const uint64_t kSample1Start = kSample0Start + kSampleDuration; - const uint64_t kSample1End = kSample1Start + kSampleDuration; - const uint64_t kSample2Start = kSample1Start + kSampleDuration; - const uint64_t kSample2End = kSample2Start + kSampleDuration; + const int64_t kSample0Start = 0; + const int64_t kSample0End = kSample0Start + kSampleDuration; + const int64_t kSample1Start = kSample0Start + kSampleDuration; + const int64_t kSample1End = kSample1Start + kSampleDuration; + const int64_t kSample2Start = kSample1Start + kSampleDuration; + const int64_t kSample2End = kSample2Start + kSampleDuration; AdCueGeneratorParams params; SyncPointQueue sync_points(params); @@ -420,12 +420,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCues) { const int64_t kSampleDuration = 1000; - const uint64_t kSample0Start = 0; - const uint64_t kSample0End = kSample0Start + kSampleDuration; - const uint64_t kSample1Start = kSample0End; - const uint64_t kSample1End = kSample1Start + kSampleDuration; - const uint64_t kSample2Start = kSample1End; - const uint64_t kSample2End = kSample2Start + kSampleDuration; + const int64_t kSample0Start = 0; + const int64_t kSample0End = kSample0Start + kSampleDuration; + const int64_t kSample1Start = kSample0End; + const int64_t kSample1End = kSample1Start + kSampleDuration; + const int64_t kSample2Start = kSample1End; + const int64_t kSample2End = kSample2Start + kSampleDuration; const double kSample1StartInSeconds = static_cast(kSample1Start) / kMsTimeScale; @@ -485,15 +485,11 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) { const int64_t kSampleDuration = 1000; const int64_t kSample0Start = 0; - const int64_t kSample1Start = kSample0Start + kSampleDuration; - const int64_t kSample2Start = kSample1Start + kSampleDuration; - - const uint64_t kSample0StartU = 0; - const uint64_t kSample0EndU = kSample0StartU + kSampleDuration; - const uint64_t kSample1StartU = kSample0EndU; - const uint64_t kSample1EndU = kSample1StartU + kSampleDuration; - const uint64_t kSample2StartU = kSample1EndU; - const uint64_t kSample2EndU = kSample2StartU + kSampleDuration; + const int64_t kSample0End = kSample0Start + kSampleDuration; + const int64_t kSample1Start = kSample0End; + const int64_t kSample1End = kSample1Start + kSampleDuration; + const int64_t kSample2Start = kSample1End; + const int64_t kSample2End = kSample2Start + kSampleDuration; const double kSample2StartInSeconds = static_cast(kSample2Start) / kMsTimeScale; @@ -516,15 +512,15 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) { EXPECT_CALL(*Output(kTextStream), OnProcess(IsStreamInfo(kParent, kNoTimeScale, !kEncrypted))); EXPECT_CALL(*Output(kTextStream), - OnProcess(IsTextSample(kNoId, kSample0StartU, kSample0EndU, + OnProcess(IsTextSample(kNoId, kSample0Start, kSample0End, kNoSettings, kNoPayload))); EXPECT_CALL(*Output(kTextStream), - OnProcess(IsTextSample(kNoId, kSample1StartU, kSample1EndU, + OnProcess(IsTextSample(kNoId, kSample1Start, kSample1End, kNoSettings, kNoPayload))); EXPECT_CALL(*Output(kTextStream), OnProcess(IsCueEvent(kParent, kSample2StartInSeconds))); EXPECT_CALL(*Output(kTextStream), - OnProcess(IsTextSample(kNoId, kSample2StartU, kSample2EndU, + OnProcess(IsTextSample(kNoId, kSample2Start, kSample2End, kNoSettings, kNoPayload))); EXPECT_CALL(*Output(kTextStream), OnFlush(kParent)); } @@ -572,15 +568,15 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) { Input(kTextStream) ->Dispatch(StreamData::FromTextSample( kChild, - GetTextSample(kNoId, kSample0StartU, kSample0EndU, kNoPayload))); + GetTextSample(kNoId, kSample0Start, kSample0End, kNoPayload))); Input(kTextStream) ->Dispatch(StreamData::FromTextSample( kChild, - GetTextSample(kNoId, kSample1StartU, kSample1EndU, kNoPayload))); + GetTextSample(kNoId, kSample1Start, kSample1End, kNoPayload))); Input(kTextStream) ->Dispatch(StreamData::FromTextSample( kChild, - GetTextSample(kNoId, kSample2StartU, kSample2EndU, kNoPayload))); + GetTextSample(kNoId, kSample2Start, kSample2End, kNoPayload))); Input(kTextStream)->FlushAllDownstreams(); Input(kAudioStream) diff --git a/packager/media/chunking/text_chunker_unittest.cc b/packager/media/chunking/text_chunker_unittest.cc index 3427e4556b..1ec6f9ca42 100644 --- a/packager/media/chunking/text_chunker_unittest.cc +++ b/packager/media/chunking/text_chunker_unittest.cc @@ -15,8 +15,7 @@ namespace shaka { namespace media { namespace { -const int64_t kStartTimeSigned = 0; -const uint64_t kStartTime = 0; +const int64_t kStartTime = 0; const int64_t kSegmentDuration = 10000; // 10 seconds const size_t kStreamIndex = 0; @@ -50,7 +49,7 @@ class TextChunkerTest : public MediaHandlerTestBase { // |[---A---]| // | | TEST_F(TextChunkerTest, CueEndingOnSegmentStart) { - const uint64_t kSampleDuration = kSegmentDuration; + const int64_t kSampleDuration = kSegmentDuration; { testing::InSequence s; @@ -64,8 +63,8 @@ TEST_F(TextChunkerTest, CueEndingOnSegmentStart) { kNoSettings, kPayload[0]))); EXPECT_CALL( *Output(kOutputIndex), - OnProcess(IsSegmentInfo(kStreamIndex, kStartTimeSigned, - kSegmentDuration, !kSubSegment, !kEncrypted))); + OnProcess(IsSegmentInfo(kStreamIndex, kStartTime, kSegmentDuration, + !kSubSegment, !kEncrypted))); EXPECT_CALL(*Output(kOutputIndex), OnFlush(kStreamIndex)); } @@ -89,7 +88,7 @@ TEST_F(TextChunkerTest, CueEndingOnSegmentStart) { TEST_F(TextChunkerTest, CreatesSegmentsForCues) { // Divide segment duration by 2 so that the sample duration won't be a full // segment. - const uint64_t kSampleDuration = kSegmentDuration / 2; + const int64_t kSampleDuration = kSegmentDuration / 2; { testing::InSequence s; @@ -103,8 +102,8 @@ TEST_F(TextChunkerTest, CreatesSegmentsForCues) { kNoSettings, kPayload[0]))); EXPECT_CALL( *Output(kOutputIndex), - OnProcess(IsSegmentInfo(kStreamIndex, kStartTimeSigned, - kSegmentDuration, !kSubSegment, !kEncrypted))); + OnProcess(IsSegmentInfo(kStreamIndex, kStartTime, kSegmentDuration, + !kSubSegment, !kEncrypted))); // Segment Two EXPECT_CALL( @@ -112,10 +111,10 @@ TEST_F(TextChunkerTest, CreatesSegmentsForCues) { OnProcess(IsTextSample(kId[1], kStartTime + kSegmentDuration, kStartTime + kSegmentDuration + kSampleDuration, kNoSettings, kPayload[1]))); - EXPECT_CALL(*Output(kOutputIndex), - OnProcess(IsSegmentInfo( - kStreamIndex, kStartTimeSigned + kSegmentDuration, - kSegmentDuration, !kSubSegment, !kEncrypted))); + EXPECT_CALL( + *Output(kOutputIndex), + OnProcess(IsSegmentInfo(kStreamIndex, kStartTime + kSegmentDuration, + kSegmentDuration, !kSubSegment, !kEncrypted))); EXPECT_CALL(*Output(kOutputIndex), OnFlush(kStreamIndex)); } @@ -143,7 +142,7 @@ TEST_F(TextChunkerTest, CreatesSegmentsForCues) { // | | [---B---] // | | TEST_F(TextChunkerTest, OutputsEmptySegments) { - const uint64_t kSampleDuration = kSegmentDuration / 2; + const int64_t kSampleDuration = kSegmentDuration / 2; const int64_t kSegment1Start = kStartTime; const int64_t kSegment2Start = kSegment1Start + kSegmentDuration; @@ -208,7 +207,7 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) { // [-----A-----|---------] // | TEST_F(TextChunkerTest, CueCrossesSegments) { - const uint64_t kSampleDuration = 2 * kSegmentDuration; + const int64_t kSampleDuration = 2 * kSegmentDuration; { testing::InSequence s; @@ -222,18 +221,18 @@ TEST_F(TextChunkerTest, CueCrossesSegments) { kNoSettings, kPayload[0]))); EXPECT_CALL( *Output(kOutputIndex), - OnProcess(IsSegmentInfo(kStreamIndex, kStartTimeSigned, - kSegmentDuration, !kSubSegment, !kEncrypted))); + OnProcess(IsSegmentInfo(kStreamIndex, kStartTime, kSegmentDuration, + !kSubSegment, !kEncrypted))); // Segment Two EXPECT_CALL( *Output(kOutputIndex), OnProcess(IsTextSample(kId[0], kStartTime, kStartTime + kSampleDuration, kNoSettings, kPayload[0]))); - EXPECT_CALL(*Output(kOutputIndex), - OnProcess(IsSegmentInfo( - kStreamIndex, kStartTimeSigned + kSegmentDuration, - kSegmentDuration, !kSubSegment, !kEncrypted))); + EXPECT_CALL( + *Output(kOutputIndex), + OnProcess(IsSegmentInfo(kStreamIndex, kStartTime + kSegmentDuration, + kSegmentDuration, !kSubSegment, !kEncrypted))); EXPECT_CALL(*Output(kOutputIndex), OnFlush(kStreamIndex)); } @@ -258,7 +257,7 @@ TEST_F(TextChunkerOrderTest, PreservesOrder) { const size_t kInput = 0; const size_t kOutput = 0; - const uint64_t kDuration = 10000; + const int64_t kDuration = 10000; const int64_t kSegmentStart1 = 0; const int64_t kSegmentStart2 = kDuration; diff --git a/packager/media/formats/webvtt/webvtt_to_mp4_handler.cc b/packager/media/formats/webvtt/webvtt_to_mp4_handler.cc index 3d0297bcad..af37b7ec3d 100644 --- a/packager/media/formats/webvtt/webvtt_to_mp4_handler.cc +++ b/packager/media/formats/webvtt/webvtt_to_mp4_handler.cc @@ -17,16 +17,16 @@ namespace media { class DisplayAction { public: - DisplayAction(uint64_t id, uint64_t time) : id_(id), time_(time) {} + DisplayAction(uint64_t id, int64_t time) : id_(id), time_(time) {} virtual ~DisplayAction() = default; uint64_t id() const { return id_; } - uint64_t time() const { return time_; } + int64_t time() const { return time_; } virtual void ActOn(std::list* display) const = 0; private: uint64_t id_; - uint64_t time_; + int64_t time_; }; namespace { @@ -92,7 +92,7 @@ Status WebVttToMp4Handler::Process(std::unique_ptr stream_data) { } Status WebVttToMp4Handler::OnFlushRequest(size_t input_stream_index) { - const uint64_t kEndOfTime = std::numeric_limits::max(); + const int64_t kEndOfTime = std::numeric_limits::max(); ProcessUpToTime(kEndOfTime); return FlushDownstream(0); @@ -121,13 +121,13 @@ void WebVttToMp4Handler::WriteCue(const std::string& id, box.Write(out); } -Status WebVttToMp4Handler::ProcessUpToTime(uint64_t cutoff_time) { +Status WebVttToMp4Handler::ProcessUpToTime(int64_t cutoff_time) { // We can only process as far as the last add as no new events will be // added that come before that time. while (actions_.size() && actions_.top()->time() < cutoff_time) { // STAGE 1: Write out the current state // Get the time range for which the current active state is valid. - const uint64_t previous_change = next_change_; + const int64_t previous_change = next_change_; next_change_ = actions_.top()->time(); if (next_change_ > previous_change) { @@ -161,8 +161,8 @@ Status WebVttToMp4Handler::ProcessUpToTime(uint64_t cutoff_time) { Status WebVttToMp4Handler::MergeAndSendSamples( const std::list& samples, - uint64_t start_time, - uint64_t end_time) { + int64_t start_time, + int64_t end_time) { DCHECK_GT(end_time, start_time); box_writer_.Clear(); @@ -181,8 +181,8 @@ Status WebVttToMp4Handler::MergeAndSendSamples( return DispatchMediaSample(kTrackId, std::move(sample)); } -Status WebVttToMp4Handler::SendEmptySample(uint64_t start_time, - uint64_t end_time) { +Status WebVttToMp4Handler::SendEmptySample(int64_t start_time, + int64_t end_time) { DCHECK_GT(end_time, start_time); box_writer_.Clear(); diff --git a/packager/media/formats/webvtt/webvtt_to_mp4_handler.h b/packager/media/formats/webvtt/webvtt_to_mp4_handler.h index 366a16a0bc..25dd838f19 100644 --- a/packager/media/formats/webvtt/webvtt_to_mp4_handler.h +++ b/packager/media/formats/webvtt/webvtt_to_mp4_handler.h @@ -54,20 +54,20 @@ class WebVttToMp4Handler : public MediaHandler { // queue's time is less than |cutoff|. |cutoff| is needed as we can only // merge and send samples when we are sure no new samples will appear before // the next action. - Status ProcessUpToTime(uint64_t cutoff_time); + Status ProcessUpToTime(int64_t cutoff_time); // Merge together all TextSamples in |samples| into a single MP4 box and // pass the box downstream. Status MergeAndSendSamples(const std::list& samples, - uint64_t start_time, - uint64_t end_time); + int64_t start_time, + int64_t end_time); - Status SendEmptySample(uint64_t start_time, uint64_t end_time); + Status SendEmptySample(int64_t start_time, int64_t end_time); // Get a new id for the next action. uint64_t NextActionId(); - uint64_t next_change_ = 0; + int64_t next_change_ = 0; // This is the current state of the box we are writing. BufferWriter box_writer_;