Change Text Sample to Use int64_t

Changed Text Sample to use int64_t so that it will use the same
type for time as Media Sample.

Change-Id: I4cfbfdc60c37bb511517993976cd1a459bdf6667
This commit is contained in:
Aaron Vaage 2018-03-26 11:04:09 -07:00
parent fac16cbf0a
commit 8f565bf388
8 changed files with 91 additions and 82 deletions

View File

@ -34,6 +34,7 @@ const bool kEncrypted = true;
// Use H264 code config.
const uint8_t kCodecConfig[]{
// clang-format off
// Header
0x01, 0x64, 0x00, 0x1e, 0xff,
// SPS count (ignore top three bits)
@ -48,6 +49,7 @@ const uint8_t kCodecConfig[]{
// PPS
0x00, 0x06, // Size
0x68, 0xeb, 0xe3, 0xcb, 0x22, 0xc0,
// clang-format on
};
// Mock data, we don't really care about what is inside.
@ -221,8 +223,8 @@ std::unique_ptr<StreamInfo> MediaHandlerTestBase::GetTextStreamInfo() const {
std::unique_ptr<TextSample> MediaHandlerTestBase::GetTextSample(
const std::string& id,
uint64_t start,
uint64_t end,
int64_t start,
int64_t end,
const std::string& payload) const {
std::unique_ptr<TextSample> sample(new TextSample);
sample->set_id(id);
@ -232,6 +234,14 @@ std::unique_ptr<TextSample> MediaHandlerTestBase::GetTextSample(
return sample;
}
std::unique_ptr<CueEvent> MediaHandlerTestBase::GetCueEvent(
double time_in_seconds) const {
std::unique_ptr<CueEvent> event(new CueEvent);
event->time_in_seconds = time_in_seconds;
return event;
}
Status MediaHandlerTestBase::SetUpAndInitializeGraph(
std::shared_ptr<MediaHandler> handler,
size_t input_count,

View File

@ -235,10 +235,12 @@ class MediaHandlerTestBase : public ::testing::Test {
std::unique_ptr<StreamInfo> GetTextStreamInfo() const;
std::unique_ptr<TextSample> GetTextSample(const std::string& id,
uint64_t start,
uint64_t end,
int64_t start,
int64_t end,
const std::string& payload) const;
std::unique_ptr<CueEvent> GetCueEvent(double time_in_seconds) const;
// Connect and initialize all handlers.
Status SetUpAndInitializeGraph(std::shared_ptr<MediaHandler> handler,
size_t input_count,

View File

@ -11,11 +11,13 @@
namespace shaka {
namespace media {
uint64_t TextSample::EndTime() const {
int64_t TextSample::EndTime() const {
return start_time_ + duration_;
}
void TextSample::SetTime(uint64_t start_time, uint64_t end_time) {
void TextSample::SetTime(int64_t start_time, int64_t end_time) {
DCHECK_GE(start_time, 0);
DCHECK_GT(end_time, 0);
DCHECK_LT(start_time, end_time);
start_time_ = start_time;
duration_ = end_time - start_time;

View File

@ -19,14 +19,14 @@ class TextSample {
TextSample() = default;
const std::string& id() const { return id_; }
uint64_t start_time() const { return start_time_; }
uint64_t duration() const { return duration_; }
int64_t start_time() const { return start_time_; }
int64_t duration() const { return duration_; }
const std::string& settings() const { return settings_; }
const std::string& payload() const { return payload_; }
uint64_t EndTime() const;
int64_t EndTime() const;
void set_id(const std::string& id) { id_ = id; }
void SetTime(uint64_t start_time, uint64_t end_time);
void SetTime(int64_t start_time, int64_t end_time);
void AppendStyle(const std::string& style);
void AppendPayload(const std::string& payload);
@ -36,8 +36,8 @@ class TextSample {
// impact is minimal.
std::string id_;
uint64_t start_time_ = 0;
uint64_t duration_ = 0;
int64_t start_time_ = 0;
int64_t duration_ = 0;
std::string settings_;
std::string payload_;
};

View File

@ -139,12 +139,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithNoCues) {
const int64_t kSampleDuration = 1000;
const uint64_t kSample0Start = 0;
const uint64_t kSample0End = kSample0Start + kSampleDuration;
const uint64_t kSample1Start = kSample0End;
const uint64_t kSample1End = kSample1Start + kSampleDuration;
const uint64_t kSample2Start = kSample1End;
const uint64_t kSample2End = kSample2Start + kSampleDuration;
const int64_t kSample0Start = 0;
const int64_t kSample0End = kSample0Start + kSampleDuration;
const int64_t kSample1Start = kSample0End;
const int64_t kSample1End = kSample1Start + kSampleDuration;
const int64_t kSample2Start = kSample1End;
const int64_t kSample2End = kSample2Start + kSampleDuration;
AdCueGeneratorParams params;
SyncPointQueue sync_points(params);
@ -191,14 +191,14 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithNoCues) {
const size_t kAudioStream = 1;
const size_t kVideoStream = 2;
const uint64_t kSampleDuration = 1000;
const int64_t kSampleDuration = 1000;
const uint64_t kSample0Start = 0;
const uint64_t kSample0End = kSample0Start + kSampleDuration;
const uint64_t kSample1Start = kSample0Start + kSampleDuration;
const uint64_t kSample1End = kSample1Start + kSampleDuration;
const uint64_t kSample2Start = kSample1Start + kSampleDuration;
const uint64_t kSample2End = kSample2Start + kSampleDuration;
const int64_t kSample0Start = 0;
const int64_t kSample0End = kSample0Start + kSampleDuration;
const int64_t kSample1Start = kSample0Start + kSampleDuration;
const int64_t kSample1End = kSample1Start + kSampleDuration;
const int64_t kSample2Start = kSample1Start + kSampleDuration;
const int64_t kSample2End = kSample2Start + kSampleDuration;
AdCueGeneratorParams params;
SyncPointQueue sync_points(params);
@ -420,12 +420,12 @@ TEST_F(CueAlignmentHandlerTest, TextInputWithCues) {
const int64_t kSampleDuration = 1000;
const uint64_t kSample0Start = 0;
const uint64_t kSample0End = kSample0Start + kSampleDuration;
const uint64_t kSample1Start = kSample0End;
const uint64_t kSample1End = kSample1Start + kSampleDuration;
const uint64_t kSample2Start = kSample1End;
const uint64_t kSample2End = kSample2Start + kSampleDuration;
const int64_t kSample0Start = 0;
const int64_t kSample0End = kSample0Start + kSampleDuration;
const int64_t kSample1Start = kSample0End;
const int64_t kSample1End = kSample1Start + kSampleDuration;
const int64_t kSample2Start = kSample1End;
const int64_t kSample2End = kSample2Start + kSampleDuration;
const double kSample1StartInSeconds =
static_cast<double>(kSample1Start) / kMsTimeScale;
@ -485,15 +485,11 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) {
const int64_t kSampleDuration = 1000;
const int64_t kSample0Start = 0;
const int64_t kSample1Start = kSample0Start + kSampleDuration;
const int64_t kSample2Start = kSample1Start + kSampleDuration;
const uint64_t kSample0StartU = 0;
const uint64_t kSample0EndU = kSample0StartU + kSampleDuration;
const uint64_t kSample1StartU = kSample0EndU;
const uint64_t kSample1EndU = kSample1StartU + kSampleDuration;
const uint64_t kSample2StartU = kSample1EndU;
const uint64_t kSample2EndU = kSample2StartU + kSampleDuration;
const int64_t kSample0End = kSample0Start + kSampleDuration;
const int64_t kSample1Start = kSample0End;
const int64_t kSample1End = kSample1Start + kSampleDuration;
const int64_t kSample2Start = kSample1End;
const int64_t kSample2End = kSample2Start + kSampleDuration;
const double kSample2StartInSeconds =
static_cast<double>(kSample2Start) / kMsTimeScale;
@ -516,15 +512,15 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) {
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsStreamInfo(kParent, kNoTimeScale, !kEncrypted)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(kNoId, kSample0StartU, kSample0EndU,
OnProcess(IsTextSample(kNoId, kSample0Start, kSample0End,
kNoSettings, kNoPayload)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(kNoId, kSample1StartU, kSample1EndU,
OnProcess(IsTextSample(kNoId, kSample1Start, kSample1End,
kNoSettings, kNoPayload)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsCueEvent(kParent, kSample2StartInSeconds)));
EXPECT_CALL(*Output(kTextStream),
OnProcess(IsTextSample(kNoId, kSample2StartU, kSample2EndU,
OnProcess(IsTextSample(kNoId, kSample2Start, kSample2End,
kNoSettings, kNoPayload)));
EXPECT_CALL(*Output(kTextStream), OnFlush(kParent));
}
@ -572,15 +568,15 @@ TEST_F(CueAlignmentHandlerTest, TextAudioVideoInputWithCues) {
Input(kTextStream)
->Dispatch(StreamData::FromTextSample(
kChild,
GetTextSample(kNoId, kSample0StartU, kSample0EndU, kNoPayload)));
GetTextSample(kNoId, kSample0Start, kSample0End, kNoPayload)));
Input(kTextStream)
->Dispatch(StreamData::FromTextSample(
kChild,
GetTextSample(kNoId, kSample1StartU, kSample1EndU, kNoPayload)));
GetTextSample(kNoId, kSample1Start, kSample1End, kNoPayload)));
Input(kTextStream)
->Dispatch(StreamData::FromTextSample(
kChild,
GetTextSample(kNoId, kSample2StartU, kSample2EndU, kNoPayload)));
GetTextSample(kNoId, kSample2Start, kSample2End, kNoPayload)));
Input(kTextStream)->FlushAllDownstreams();
Input(kAudioStream)

View File

@ -15,8 +15,7 @@ namespace shaka {
namespace media {
namespace {
const int64_t kStartTimeSigned = 0;
const uint64_t kStartTime = 0;
const int64_t kStartTime = 0;
const int64_t kSegmentDuration = 10000; // 10 seconds
const size_t kStreamIndex = 0;
@ -50,7 +49,7 @@ class TextChunkerTest : public MediaHandlerTestBase {
// |[---A---]|
// | |
TEST_F(TextChunkerTest, CueEndingOnSegmentStart) {
const uint64_t kSampleDuration = kSegmentDuration;
const int64_t kSampleDuration = kSegmentDuration;
{
testing::InSequence s;
@ -64,8 +63,8 @@ TEST_F(TextChunkerTest, CueEndingOnSegmentStart) {
kNoSettings, kPayload[0])));
EXPECT_CALL(
*Output(kOutputIndex),
OnProcess(IsSegmentInfo(kStreamIndex, kStartTimeSigned,
kSegmentDuration, !kSubSegment, !kEncrypted)));
OnProcess(IsSegmentInfo(kStreamIndex, kStartTime, kSegmentDuration,
!kSubSegment, !kEncrypted)));
EXPECT_CALL(*Output(kOutputIndex), OnFlush(kStreamIndex));
}
@ -89,7 +88,7 @@ TEST_F(TextChunkerTest, CueEndingOnSegmentStart) {
TEST_F(TextChunkerTest, CreatesSegmentsForCues) {
// Divide segment duration by 2 so that the sample duration won't be a full
// segment.
const uint64_t kSampleDuration = kSegmentDuration / 2;
const int64_t kSampleDuration = kSegmentDuration / 2;
{
testing::InSequence s;
@ -103,8 +102,8 @@ TEST_F(TextChunkerTest, CreatesSegmentsForCues) {
kNoSettings, kPayload[0])));
EXPECT_CALL(
*Output(kOutputIndex),
OnProcess(IsSegmentInfo(kStreamIndex, kStartTimeSigned,
kSegmentDuration, !kSubSegment, !kEncrypted)));
OnProcess(IsSegmentInfo(kStreamIndex, kStartTime, kSegmentDuration,
!kSubSegment, !kEncrypted)));
// Segment Two
EXPECT_CALL(
@ -112,10 +111,10 @@ TEST_F(TextChunkerTest, CreatesSegmentsForCues) {
OnProcess(IsTextSample(kId[1], kStartTime + kSegmentDuration,
kStartTime + kSegmentDuration + kSampleDuration,
kNoSettings, kPayload[1])));
EXPECT_CALL(*Output(kOutputIndex),
OnProcess(IsSegmentInfo(
kStreamIndex, kStartTimeSigned + kSegmentDuration,
kSegmentDuration, !kSubSegment, !kEncrypted)));
EXPECT_CALL(
*Output(kOutputIndex),
OnProcess(IsSegmentInfo(kStreamIndex, kStartTime + kSegmentDuration,
kSegmentDuration, !kSubSegment, !kEncrypted)));
EXPECT_CALL(*Output(kOutputIndex), OnFlush(kStreamIndex));
}
@ -143,7 +142,7 @@ TEST_F(TextChunkerTest, CreatesSegmentsForCues) {
// | | [---B---]
// | |
TEST_F(TextChunkerTest, OutputsEmptySegments) {
const uint64_t kSampleDuration = kSegmentDuration / 2;
const int64_t kSampleDuration = kSegmentDuration / 2;
const int64_t kSegment1Start = kStartTime;
const int64_t kSegment2Start = kSegment1Start + kSegmentDuration;
@ -208,7 +207,7 @@ TEST_F(TextChunkerTest, OutputsEmptySegments) {
// [-----A-----|---------]
// |
TEST_F(TextChunkerTest, CueCrossesSegments) {
const uint64_t kSampleDuration = 2 * kSegmentDuration;
const int64_t kSampleDuration = 2 * kSegmentDuration;
{
testing::InSequence s;
@ -222,18 +221,18 @@ TEST_F(TextChunkerTest, CueCrossesSegments) {
kNoSettings, kPayload[0])));
EXPECT_CALL(
*Output(kOutputIndex),
OnProcess(IsSegmentInfo(kStreamIndex, kStartTimeSigned,
kSegmentDuration, !kSubSegment, !kEncrypted)));
OnProcess(IsSegmentInfo(kStreamIndex, kStartTime, kSegmentDuration,
!kSubSegment, !kEncrypted)));
// Segment Two
EXPECT_CALL(
*Output(kOutputIndex),
OnProcess(IsTextSample(kId[0], kStartTime, kStartTime + kSampleDuration,
kNoSettings, kPayload[0])));
EXPECT_CALL(*Output(kOutputIndex),
OnProcess(IsSegmentInfo(
kStreamIndex, kStartTimeSigned + kSegmentDuration,
kSegmentDuration, !kSubSegment, !kEncrypted)));
EXPECT_CALL(
*Output(kOutputIndex),
OnProcess(IsSegmentInfo(kStreamIndex, kStartTime + kSegmentDuration,
kSegmentDuration, !kSubSegment, !kEncrypted)));
EXPECT_CALL(*Output(kOutputIndex), OnFlush(kStreamIndex));
}
@ -258,7 +257,7 @@ TEST_F(TextChunkerOrderTest, PreservesOrder) {
const size_t kInput = 0;
const size_t kOutput = 0;
const uint64_t kDuration = 10000;
const int64_t kDuration = 10000;
const int64_t kSegmentStart1 = 0;
const int64_t kSegmentStart2 = kDuration;

View File

@ -17,16 +17,16 @@ namespace media {
class DisplayAction {
public:
DisplayAction(uint64_t id, uint64_t time) : id_(id), time_(time) {}
DisplayAction(uint64_t id, int64_t time) : id_(id), time_(time) {}
virtual ~DisplayAction() = default;
uint64_t id() const { return id_; }
uint64_t time() const { return time_; }
int64_t time() const { return time_; }
virtual void ActOn(std::list<const TextSample*>* display) const = 0;
private:
uint64_t id_;
uint64_t time_;
int64_t time_;
};
namespace {
@ -92,7 +92,7 @@ Status WebVttToMp4Handler::Process(std::unique_ptr<StreamData> stream_data) {
}
Status WebVttToMp4Handler::OnFlushRequest(size_t input_stream_index) {
const uint64_t kEndOfTime = std::numeric_limits<uint64_t>::max();
const int64_t kEndOfTime = std::numeric_limits<int64_t>::max();
ProcessUpToTime(kEndOfTime);
return FlushDownstream(0);
@ -121,13 +121,13 @@ void WebVttToMp4Handler::WriteCue(const std::string& id,
box.Write(out);
}
Status WebVttToMp4Handler::ProcessUpToTime(uint64_t cutoff_time) {
Status WebVttToMp4Handler::ProcessUpToTime(int64_t cutoff_time) {
// We can only process as far as the last add as no new events will be
// added that come before that time.
while (actions_.size() && actions_.top()->time() < cutoff_time) {
// STAGE 1: Write out the current state
// Get the time range for which the current active state is valid.
const uint64_t previous_change = next_change_;
const int64_t previous_change = next_change_;
next_change_ = actions_.top()->time();
if (next_change_ > previous_change) {
@ -161,8 +161,8 @@ Status WebVttToMp4Handler::ProcessUpToTime(uint64_t cutoff_time) {
Status WebVttToMp4Handler::MergeAndSendSamples(
const std::list<const TextSample*>& samples,
uint64_t start_time,
uint64_t end_time) {
int64_t start_time,
int64_t end_time) {
DCHECK_GT(end_time, start_time);
box_writer_.Clear();
@ -181,8 +181,8 @@ Status WebVttToMp4Handler::MergeAndSendSamples(
return DispatchMediaSample(kTrackId, std::move(sample));
}
Status WebVttToMp4Handler::SendEmptySample(uint64_t start_time,
uint64_t end_time) {
Status WebVttToMp4Handler::SendEmptySample(int64_t start_time,
int64_t end_time) {
DCHECK_GT(end_time, start_time);
box_writer_.Clear();

View File

@ -54,20 +54,20 @@ class WebVttToMp4Handler : public MediaHandler {
// queue's time is less than |cutoff|. |cutoff| is needed as we can only
// merge and send samples when we are sure no new samples will appear before
// the next action.
Status ProcessUpToTime(uint64_t cutoff_time);
Status ProcessUpToTime(int64_t cutoff_time);
// Merge together all TextSamples in |samples| into a single MP4 box and
// pass the box downstream.
Status MergeAndSendSamples(const std::list<const TextSample*>& samples,
uint64_t start_time,
uint64_t end_time);
int64_t start_time,
int64_t end_time);
Status SendEmptySample(uint64_t start_time, uint64_t end_time);
Status SendEmptySample(int64_t start_time, int64_t end_time);
// Get a new id for the next action.
uint64_t NextActionId();
uint64_t next_change_ = 0;
int64_t next_change_ = 0;
// This is the current state of the box we are writing.
BufferWriter box_writer_;