[Cleanup] Use int64_t for time for consistency

Change-Id: I408f3fe7c98320be4e43ca2a5bcb9b22cc8a2012
This commit is contained in:
KongQun Yang 2018-06-21 18:14:34 -07:00
parent d1caa29c8d
commit b4256bf040
21 changed files with 176 additions and 185 deletions

View File

@ -374,8 +374,8 @@ bool MediaPlaylist::SetMediaInfo(const MediaInfo& media_info) {
}
void MediaPlaylist::AddSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t start_byte_offset,
uint64_t size) {
if (stream_type_ == MediaPlaylistStreamType::kVideoIFramesOnly) {
@ -387,9 +387,9 @@ void MediaPlaylist::AddSegment(const std::string& file_name,
for (auto iter = key_frames_.begin(); iter != key_frames_.end(); ++iter) {
// Last entry duration may be adjusted later when the next iframe becomes
// available.
const uint64_t next_timestamp = std::next(iter) == key_frames_.end()
? (start_time + duration)
: std::next(iter)->timestamp;
const int64_t next_timestamp = std::next(iter) == key_frames_.end()
? (start_time + duration)
: std::next(iter)->timestamp;
AddSegmentInfoEntry(file_name, iter->timestamp,
next_timestamp - iter->timestamp,
iter->start_byte_offset, iter->size);
@ -401,7 +401,7 @@ void MediaPlaylist::AddSegment(const std::string& file_name,
size);
}
void MediaPlaylist::AddKeyFrame(uint64_t timestamp,
void MediaPlaylist::AddKeyFrame(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size) {
if (stream_type_ != MediaPlaylistStreamType::kVideoIFramesOnly) {
@ -504,8 +504,8 @@ bool MediaPlaylist::GetDisplayResolution(uint32_t* width,
}
void MediaPlaylist::AddSegmentInfoEntry(const std::string& segment_file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t start_byte_offset,
uint64_t size) {
if (time_scale_ == 0) {
@ -533,8 +533,7 @@ void MediaPlaylist::AddSegmentInfoEntry(const std::string& segment_file_name,
SlideWindow();
}
void MediaPlaylist::AdjustLastSegmentInfoEntryDuration(
uint64_t next_timestamp) {
void MediaPlaylist::AdjustLastSegmentInfoEntryDuration(int64_t next_timestamp) {
if (time_scale_ == 0)
return;
@ -613,7 +612,7 @@ void MediaPlaylist::SlideWindow() {
std::make_move_iterator(ext_x_keys.end()));
}
void MediaPlaylist::RemoveOldSegment(uint64_t start_time) {
void MediaPlaylist::RemoveOldSegment(int64_t start_time) {
if (hls_params_.preserved_segments_outside_live_window == 0)
return;
if (stream_type_ == MediaPlaylistStreamType::kVideoIFramesOnly)

View File

@ -101,8 +101,8 @@ class MediaPlaylist {
/// This must be 0 if the whole segment is a subsegment.
/// @param size is size in bytes.
virtual void AddSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t start_byte_offset,
uint64_t size);
@ -112,7 +112,7 @@ class MediaPlaylist {
/// media.
/// @param start_byte_offset is the offset of where the key frame starts.
/// @param size is size in bytes.
virtual void AddKeyFrame(uint64_t timestamp,
virtual void AddKeyFrame(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size);
@ -182,20 +182,20 @@ class MediaPlaylist {
private:
// Add a SegmentInfoEntry (#EXTINF).
void AddSegmentInfoEntry(const std::string& segment_file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t start_byte_offset,
uint64_t size);
// Adjust the duration of the last SegmentInfoEntry to end on
// |next_timestamp|.
void AdjustLastSegmentInfoEntryDuration(uint64_t next_timestamp);
void AdjustLastSegmentInfoEntryDuration(int64_t next_timestamp);
// Remove elements from |entries_| for live profile. Increments
// |sequence_number_| by the number of segments removed.
void SlideWindow();
// Remove the segment specified by |start_time|. The actual deletion can
// happen at a later time depending on the value of
// |preserved_segment_outside_live_window| in |hls_params_|.
void RemoveOldSegment(uint64_t start_time);
void RemoveOldSegment(int64_t start_time);
const HlsParams& hls_params_;
// Mainly for MasterPlaylist to use these values.
@ -232,7 +232,7 @@ class MediaPlaylist {
// Used by kVideoIFrameOnly playlists to track the i-frames (key frames).
struct KeyFrameInfo {
uint64_t timestamp;
int64_t timestamp;
uint64_t start_byte_offset;
uint64_t size;
std::string segment_file_name;

View File

@ -26,12 +26,12 @@ class MockMediaPlaylist : public MediaPlaylist {
MOCK_METHOD1(SetMediaInfo, bool(const MediaInfo& media_info));
MOCK_METHOD5(AddSegment,
void(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t start_byte_offset,
uint64_t size));
MOCK_METHOD3(AddKeyFrame,
void(uint64_t timestamp,
void(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size));
MOCK_METHOD6(AddEncryptionInfo,

View File

@ -57,15 +57,15 @@ void CombinedMuxerListener::OnMediaEnd(const MediaRanges& media_ranges,
}
void CombinedMuxerListener::OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) {
for (auto& listener : muxer_listeners_) {
listener->OnNewSegment(file_name, start_time, duration, segment_file_size);
}
}
void CombinedMuxerListener::OnKeyFrame(uint64_t timestamp,
void CombinedMuxerListener::OnKeyFrame(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size) {
for (auto& listener : muxer_listeners_) {
@ -73,7 +73,7 @@ void CombinedMuxerListener::OnKeyFrame(uint64_t timestamp,
}
}
void CombinedMuxerListener::OnCueEvent(uint64_t timestamp,
void CombinedMuxerListener::OnCueEvent(int64_t timestamp,
const std::string& cue_data) {
for (auto& listener : muxer_listeners_) {
listener->OnCueEvent(timestamp, cue_data);

View File

@ -36,13 +36,11 @@ class CombinedMuxerListener : public MuxerListener {
void OnMediaEnd(const MediaRanges& media_ranges,
float duration_seconds) override;
void OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) override;
void OnKeyFrame(uint64_t timestamp,
uint64_t start_byte_offset,
uint64_t size);
void OnCueEvent(uint64_t timestamp, const std::string& cue_data) override;
void OnKeyFrame(int64_t timestamp, uint64_t start_byte_offset, uint64_t size);
void OnCueEvent(int64_t timestamp, const std::string& cue_data) override;
private:
std::list<std::unique_ptr<MuxerListener>> muxer_listeners_;

View File

@ -16,14 +16,14 @@ namespace media {
// This stores data passed into OnNewSegment() for VOD.
struct SegmentEventInfo {
uint64_t start_time;
int64_t start_time;
// The below two fields are only useful for Segment.
uint64_t duration;
int64_t duration;
uint64_t segment_file_size;
};
struct KeyFrameEvent {
uint64_t timestamp;
int64_t timestamp;
// In segment for multi-segment, in subsegment for single-segment.
uint64_t start_offset_in_segment;
uint64_t size;
@ -31,7 +31,7 @@ struct KeyFrameEvent {
// This stores data passed into OnCueEvent() for VOD.
struct CueEventInfo {
uint64_t timestamp;
int64_t timestamp;
};
enum class EventInfoType {

View File

@ -202,8 +202,8 @@ void HlsNotifyMuxerListener::OnMediaEnd(const MediaRanges& media_ranges,
}
void HlsNotifyMuxerListener::OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) {
if (!media_info_->has_segment_template()) {
EventInfo event_info;
@ -220,7 +220,7 @@ void HlsNotifyMuxerListener::OnNewSegment(const std::string& file_name,
}
}
void HlsNotifyMuxerListener::OnKeyFrame(uint64_t timestamp,
void HlsNotifyMuxerListener::OnKeyFrame(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size) {
if (!iframes_only_)
@ -237,7 +237,7 @@ void HlsNotifyMuxerListener::OnKeyFrame(uint64_t timestamp,
}
}
void HlsNotifyMuxerListener::OnCueEvent(uint64_t timestamp,
void HlsNotifyMuxerListener::OnCueEvent(int64_t timestamp,
const std::string& cue_data) {
// Not using |cue_data| at this moment.
if (!media_info_->has_segment_template()) {

View File

@ -60,13 +60,11 @@ class HlsNotifyMuxerListener : public MuxerListener {
void OnMediaEnd(const MediaRanges& media_ranges,
float duration_seconds) override;
void OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) override;
void OnKeyFrame(uint64_t timestamp,
uint64_t start_byte_offset,
uint64_t size);
void OnCueEvent(uint64_t timestamp, const std::string& cue_data) override;
void OnKeyFrame(int64_t timestamp, uint64_t start_byte_offset, uint64_t size);
void OnCueEvent(int64_t timestamp, const std::string& cue_data) override;
/// @}
private:

View File

@ -58,17 +58,17 @@ class MockMuxerListener : public MuxerListener {
MOCK_METHOD4(OnNewSegment,
void(const std::string& segment_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size));
MOCK_METHOD3(OnKeyFrame,
void(uint64_t timestamp,
void(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size));
MOCK_METHOD2(OnCueEvent,
void(uint64_t timestamp, const std::string& cue_data));
void(int64_t timestamp, const std::string& cue_data));
};
} // namespace media

View File

@ -171,8 +171,8 @@ void MpdNotifyMuxerListener::OnMediaEnd(const MediaRanges& media_ranges,
}
void MpdNotifyMuxerListener::OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) {
if (mpd_notifier_->dash_profile() == DashProfile::kLive) {
mpd_notifier_->NotifyNewSegment(notification_id_.value(), start_time,
@ -187,13 +187,13 @@ void MpdNotifyMuxerListener::OnNewSegment(const std::string& file_name,
}
}
void MpdNotifyMuxerListener::OnKeyFrame(uint64_t timestamp,
void MpdNotifyMuxerListener::OnKeyFrame(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size) {
// NO-OP for DASH.
}
void MpdNotifyMuxerListener::OnCueEvent(uint64_t timestamp,
void MpdNotifyMuxerListener::OnCueEvent(int64_t timestamp,
const std::string& cue_data) {
// Not using |cue_data| at this moment.
if (mpd_notifier_->dash_profile() == DashProfile::kLive) {

View File

@ -48,13 +48,11 @@ class MpdNotifyMuxerListener : public MuxerListener {
void OnMediaEnd(const MediaRanges& media_ranges,
float duration_seconds) override;
void OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) override;
void OnKeyFrame(uint64_t timestamp,
uint64_t start_byte_offset,
uint64_t size);
void OnCueEvent(uint64_t timestamp, const std::string& cue_data) override;
void OnKeyFrame(int64_t timestamp, uint64_t start_byte_offset, uint64_t size);
void OnCueEvent(int64_t timestamp, const std::string& cue_data) override;
/// @}
private:

View File

@ -125,8 +125,8 @@ class MuxerListener {
/// specified by MediaInfo passed to OnMediaStart().
/// @param segment_file_size is the segment size in bytes.
virtual void OnNewSegment(const std::string& segment_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) = 0;
/// Called when there is a new key frame. For Video only. Note that it should
@ -134,14 +134,14 @@ class MuxerListener {
/// @param timestamp is in terms of the timescale of the media.
/// @param start_byte_offset is the offset of where the key frame starts.
/// @param size is size in bytes.
virtual void OnKeyFrame(uint64_t timestamp,
virtual void OnKeyFrame(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size) = 0;
/// Called when there is a new Ad Cue, which should align with (sub)segments.
/// @param timestamp indicate the cue timestamp.
/// @param cue_data is the data of the cue.
virtual void OnCueEvent(uint64_t timestamp, const std::string& cue_data) = 0;
virtual void OnCueEvent(int64_t timestamp, const std::string& cue_data) = 0;
protected:
MuxerListener() = default;

View File

@ -88,8 +88,8 @@ void VodMediaInfoDumpMuxerListener::OnMediaEnd(const MediaRanges& media_ranges,
}
void VodMediaInfoDumpMuxerListener::OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) {
const double segment_duration_seconds =
static_cast<double>(duration) / media_info_->reference_time_scale();
@ -100,11 +100,11 @@ void VodMediaInfoDumpMuxerListener::OnNewSegment(const std::string& file_name,
max_bitrate_ = std::max(max_bitrate_, bitrate);
}
void VodMediaInfoDumpMuxerListener::OnKeyFrame(uint64_t timestamp,
void VodMediaInfoDumpMuxerListener::OnKeyFrame(int64_t timestamp,
uint64_t start_byte_offset,
uint64_t size) {}
void VodMediaInfoDumpMuxerListener::OnCueEvent(uint64_t timestamp,
void VodMediaInfoDumpMuxerListener::OnCueEvent(int64_t timestamp,
const std::string& cue_data) {
NOTIMPLEMENTED();
}

View File

@ -47,13 +47,11 @@ class VodMediaInfoDumpMuxerListener : public MuxerListener {
void OnMediaEnd(const MediaRanges& media_ranges,
float duration_seconds) override;
void OnNewSegment(const std::string& file_name,
uint64_t start_time,
uint64_t duration,
int64_t start_time,
int64_t duration,
uint64_t segment_file_size) override;
void OnKeyFrame(uint64_t timestamp,
uint64_t start_byte_offset,
uint64_t size);
void OnCueEvent(uint64_t timestamp, const std::string& cue_data) override;
void OnKeyFrame(int64_t timestamp, uint64_t start_byte_offset, uint64_t size);
void OnCueEvent(int64_t timestamp, const std::string& cue_data) override;
/// @}
/// Write @a media_info to @a output_file_path in human readable format.

View File

@ -145,8 +145,8 @@ class RepresentationStateChangeListenerImpl
~RepresentationStateChangeListenerImpl() override {}
// RepresentationStateChangeListener implementation.
void OnNewSegmentForRepresentation(uint64_t start_time,
uint64_t duration) override {
void OnNewSegmentForRepresentation(int64_t start_time,
int64_t duration) override {
adaptation_set_->OnNewSegmentForRepresentation(representation_id_,
start_time, duration);
}

View File

@ -76,7 +76,7 @@ class MockRepresentation : public Representation {
MOCK_METHOD2(UpdateContentProtectionPssh,
void(const std::string& drm_uuid, const std::string& pssh));
MOCK_METHOD3(AddNewSegment,
void(uint64_t start_time, uint64_t duration, uint64_t size));
void(int64_t start_time, int64_t duration, uint64_t size));
MOCK_METHOD1(SetSampleDuration, void(uint32_t sample_duration));
MOCK_CONST_METHOD0(GetMediaInfo, const MediaInfo&());
};

View File

@ -79,17 +79,17 @@ uint32_t GetTimeScale(const MediaInfo& media_info) {
return 1;
}
uint64_t LastSegmentStartTime(const SegmentInfo& segment_info) {
int64_t LastSegmentStartTime(const SegmentInfo& segment_info) {
return segment_info.start_time + segment_info.duration * segment_info.repeat;
}
// This is equal to |segment_info| end time
uint64_t LastSegmentEndTime(const SegmentInfo& segment_info) {
int64_t LastSegmentEndTime(const SegmentInfo& segment_info) {
return segment_info.start_time +
segment_info.duration * (segment_info.repeat + 1);
}
uint64_t LatestSegmentStartTime(const std::list<SegmentInfo>& segments) {
int64_t LatestSegmentStartTime(const std::list<SegmentInfo>& segments) {
DCHECK(!segments.empty());
const SegmentInfo& latest_segment = segments.back();
return LastSegmentStartTime(latest_segment);
@ -97,7 +97,7 @@ uint64_t LatestSegmentStartTime(const std::list<SegmentInfo>& segments) {
// Given |timeshift_limit|, finds out the number of segments that are no longer
// valid and should be removed from |segment_info|.
uint64_t SearchTimedOutRepeatIndex(uint64_t timeshift_limit,
uint64_t SearchTimedOutRepeatIndex(int64_t timeshift_limit,
const SegmentInfo& segment_info) {
DCHECK_LE(timeshift_limit, LastSegmentEndTime(segment_info));
if (timeshift_limit < segment_info.start_time)
@ -195,8 +195,8 @@ void Representation::UpdateContentProtectionPssh(const std::string& drm_uuid,
&content_protection_elements_);
}
void Representation::AddNewSegment(uint64_t start_time,
uint64_t duration,
void Representation::AddNewSegment(int64_t start_time,
int64_t duration,
uint64_t size) {
if (start_time == 0 && duration == 0) {
LOG(WARNING) << "Got segment with start_time and duration == 0. Ignoring.";
@ -309,7 +309,7 @@ void Representation::SuppressOnce(SuppressFlag flag) {
void Representation::SetPresentationTimeOffset(
double presentation_time_offset) {
uint64_t pto = presentation_time_offset * media_info_.reference_time_scale();
int64_t pto = presentation_time_offset * media_info_.reference_time_scale();
if (pto <= 0)
return;
media_info_.set_presentation_time_offset(pto);
@ -350,21 +350,21 @@ bool Representation::HasRequiredMediaInfoFields() const {
return true;
}
void Representation::AddSegmentInfo(uint64_t start_time, uint64_t duration) {
void Representation::AddSegmentInfo(int64_t start_time, int64_t duration) {
const uint64_t kNoRepeat = 0;
const uint64_t adjusted_duration = AdjustDuration(duration);
const int64_t adjusted_duration = AdjustDuration(duration);
if (!segment_infos_.empty()) {
// Contiguous segment.
const SegmentInfo& previous = segment_infos_.back();
const uint64_t previous_segment_end_time =
const int64_t previous_segment_end_time =
previous.start_time + previous.duration * (previous.repeat + 1);
// Make it continuous if the segment start time is close to previous segment
// end time.
if (ApproximiatelyEqual(previous_segment_end_time, start_time)) {
const uint64_t segment_end_time_for_same_duration =
const int64_t segment_end_time_for_same_duration =
previous_segment_end_time + previous.duration;
const uint64_t actual_segment_end_time = start_time + duration;
const int64_t actual_segment_end_time = start_time + duration;
// Consider the segments having identical duration if the segment end time
// is close to calculated segment end time by assuming identical duration.
if (ApproximiatelyEqual(segment_end_time_for_same_duration,
@ -379,7 +379,7 @@ void Representation::AddSegmentInfo(uint64_t start_time, uint64_t duration) {
}
// A gap since previous.
const uint64_t kRoundingErrorGrace = 5;
const int64_t kRoundingErrorGrace = 5;
if (previous_segment_end_time + kRoundingErrorGrace < start_time) {
LOG(WARNING) << "Found a gap of size "
<< (start_time - previous_segment_end_time)
@ -401,7 +401,7 @@ void Representation::AddSegmentInfo(uint64_t start_time, uint64_t duration) {
segment_infos_.push_back({start_time, adjusted_duration, kNoRepeat});
}
bool Representation::ApproximiatelyEqual(uint64_t time1, uint64_t time2) const {
bool Representation::ApproximiatelyEqual(int64_t time1, int64_t time2) const {
if (!allow_approximate_segment_timeline_)
return time1 == time2;
@ -423,10 +423,10 @@ bool Representation::ApproximiatelyEqual(uint64_t time1, uint64_t time2) const {
return time1 <= time2 + error_threshold && time2 <= time1 + error_threshold;
}
uint64_t Representation::AdjustDuration(uint64_t duration) const {
int64_t Representation::AdjustDuration(int64_t duration) const {
if (!allow_approximate_segment_timeline_)
return duration;
const uint64_t scaled_target_duration =
const int64_t scaled_target_duration =
mpd_options_.target_segment_duration * media_info_.reference_time_scale();
return ApproximiatelyEqual(scaled_target_duration, duration)
? scaled_target_duration
@ -442,23 +442,23 @@ void Representation::SlideWindow() {
const uint32_t time_scale = GetTimeScale(media_info_);
DCHECK_GT(time_scale, 0u);
uint64_t time_shift_buffer_depth = static_cast<uint64_t>(
int64_t time_shift_buffer_depth = static_cast<int64_t>(
mpd_options_.mpd_params.time_shift_buffer_depth * time_scale);
// The start time of the latest segment is considered the current_play_time,
// and this should guarantee that the latest segment will stay in the list.
const uint64_t current_play_time = LatestSegmentStartTime(segment_infos_);
const int64_t current_play_time = LatestSegmentStartTime(segment_infos_);
if (current_play_time <= time_shift_buffer_depth)
return;
const uint64_t timeshift_limit = current_play_time - time_shift_buffer_depth;
const int64_t timeshift_limit = current_play_time - time_shift_buffer_depth;
// First remove all the SegmentInfos that are completely out of range, by
// looking at the very last segment's end time.
std::list<SegmentInfo>::iterator first = segment_infos_.begin();
std::list<SegmentInfo>::iterator last = first;
for (; last != segment_infos_.end(); ++last) {
const uint64_t last_segment_end_time = LastSegmentEndTime(*last);
const int64_t last_segment_end_time = LastSegmentEndTime(*last);
if (timeshift_limit < last_segment_end_time)
break;
RemoveSegments(last->start_time, last->duration, last->repeat + 1);
@ -485,8 +485,8 @@ void Representation::SlideWindow() {
start_number_ += repeat_index;
}
void Representation::RemoveSegments(uint64_t start_time,
uint64_t duration,
void Representation::RemoveSegments(int64_t start_time,
int64_t duration,
uint64_t num_segments) {
if (mpd_options_.mpd_params.preserved_segments_outside_live_window == 0)
return;

View File

@ -38,8 +38,8 @@ class RepresentationStateChangeListener {
/// the Representation.
/// @param start_time is the start time of the new segment.
/// @param duration is the duration of the new segment.
virtual void OnNewSegmentForRepresentation(uint64_t start_time,
uint64_t duration) = 0;
virtual void OnNewSegmentForRepresentation(int64_t start_time,
int64_t duration) = 0;
/// Notifies the instance that the frame rate was set for the
/// Representation.
@ -101,8 +101,8 @@ class Representation {
/// @param duration is the duration of the segment, in units of the stream's
/// time scale.
/// @param size of the segment in bytes.
virtual void AddNewSegment(uint64_t start_time,
uint64_t duration,
virtual void AddNewSegment(int64_t start_time,
int64_t duration,
uint64_t size);
/// Set the sample duration of this Representation.
@ -182,24 +182,24 @@ class Representation {
// Add a SegmentInfo. This function may insert an adjusted SegmentInfo if
// |allow_approximate_segment_timeline_| is set.
void AddSegmentInfo(uint64_t start_time, uint64_t duration);
void AddSegmentInfo(int64_t start_time, int64_t duration);
// Check if two timestamps are approximately equal if
// |allow_approximate_segment_timeline_| is set; Otherwise check whether the
// two times match.
bool ApproximiatelyEqual(uint64_t time1, uint64_t time2) const;
bool ApproximiatelyEqual(int64_t time1, int64_t time2) const;
// Return adjusted duration if |allow_aproximate_segment_timeline_or_duration|
// is set; otherwise duration is returned without adjustment.
uint64_t AdjustDuration(uint64_t duration) const;
int64_t AdjustDuration(int64_t duration) const;
// Remove elements from |segment_infos_| for dynamic live profile. Increments
// |start_number_| by the number of segments removed.
void SlideWindow();
// Remove |num_segments| starting from |start_time| with |duration|.
void RemoveSegments(uint64_t start_time,
uint64_t duration,
void RemoveSegments(int64_t start_time,
int64_t duration,
uint64_t num_segments);
// Note: Because 'mimeType' is a required field for a valid MPD, these return

View File

@ -37,7 +37,7 @@ class MockRepresentationStateChangeListener
~MockRepresentationStateChangeListener() {}
MOCK_METHOD2(OnNewSegmentForRepresentation,
void(uint64_t start_time, uint64_t duration));
void(int64_t start_time, int64_t duration));
MOCK_METHOD2(OnSetFrameRateForRepresentation,
void(uint32_t frame_duration, uint32_t timescale));
@ -270,8 +270,8 @@ TEST_F(RepresentationTest,
"}\n"
"container_type: 1\n";
const uint64_t kStartTime = 199238u;
const uint64_t kDuration = 98u;
const int64_t kStartTime = 199238u;
const int64_t kDuration = 98u;
std::unique_ptr<MockRepresentationStateChangeListener> listener(
new MockRepresentationStateChangeListener());
EXPECT_CALL(*listener, OnNewSegmentForRepresentation(kStartTime, kDuration));
@ -300,8 +300,8 @@ TEST_F(RepresentationTest,
"}\n"
"container_type: 1\n";
const uint64_t kTimeScale = 1000u;
const uint64_t kFrameDuration = 33u;
const uint32_t kTimeScale = 1000u;
const int64_t kFrameDuration = 33u;
std::unique_ptr<MockRepresentationStateChangeListener> listener(
new MockRepresentationStateChangeListener());
EXPECT_CALL(*listener,
@ -415,7 +415,7 @@ const char kSElementTemplateWithoutR[] =
"<S t=\"%" PRIu64 "\" d=\"%" PRIu64 "\"/>\n";
const int kDefaultStartNumber = 1;
const uint32_t kDefaultTimeScale = 1000u;
const uint64_t kScaledTargetSegmentDuration = 10;
const int64_t kScaledTargetSegmentDuration = 10;
const uint32_t kSampleDuration = 2;
std::string GetDefaultMediaInfo() {
@ -453,8 +453,8 @@ class SegmentTemplateTest : public RepresentationTest {
ASSERT_TRUE(representation_->Init());
}
void AddSegments(uint64_t start_time,
uint64_t duration,
void AddSegments(int64_t start_time,
int64_t duration,
uint64_t size,
uint64_t repeat) {
DCHECK(representation_);
@ -504,8 +504,8 @@ class SegmentTemplateTest : public RepresentationTest {
// Estimate the bandwidth given the info from AddNewSegment().
TEST_F(SegmentTemplateTest, OneSegmentNormal) {
const uint64_t kStartTime = 0;
const uint64_t kDuration = 10;
const int64_t kStartTime = 0;
const int64_t kDuration = 10;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDuration, kSize, 0);
@ -520,8 +520,8 @@ TEST_F(SegmentTemplateTest, RepresentationClone) {
CreateRepresentation(media_info, kAnyRepresentationId, NoListener());
ASSERT_TRUE(representation_->Init());
const uint64_t kStartTime = 0;
const uint64_t kDuration = 10;
const int64_t kStartTime = 0;
const int64_t kDuration = 10;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDuration, kSize, 0);
@ -540,8 +540,8 @@ TEST_F(SegmentTemplateTest, RepresentationClone) {
}
TEST_F(SegmentTemplateTest, PresentationTimeOffset) {
const uint64_t kStartTime = 0;
const uint64_t kDuration = 10;
const int64_t kStartTime = 0;
const int64_t kDuration = 10;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDuration, kSize, 0);
@ -569,8 +569,8 @@ TEST_F(SegmentTemplateTest, GetStartAndEndTimestamps) {
EXPECT_FALSE(representation_->GetStartAndEndTimestamps(&start_timestamp,
&end_timestamp));
const uint64_t kStartTime = 88;
const uint64_t kDuration = 10;
const int64_t kStartTime = 88;
const int64_t kDuration = 10;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDuration, kSize, 0);
AddSegments(kStartTime + kDuration, kDuration, kSize, 2);
@ -584,8 +584,8 @@ TEST_F(SegmentTemplateTest, GetStartAndEndTimestamps) {
TEST_F(SegmentTemplateTest, NormalRepeatedSegmentDuration) {
const uint64_t kSize = 256;
uint64_t start_time = 0;
uint64_t duration = 40000;
int64_t start_time = 0;
int64_t duration = 40000;
uint64_t repeat = 2;
AddSegments(start_time, duration, kSize, repeat);
@ -604,8 +604,8 @@ TEST_F(SegmentTemplateTest, NormalRepeatedSegmentDuration) {
TEST_F(SegmentTemplateTest, RepeatedSegmentsFromNonZeroStartTime) {
const uint64_t kSize = 100000;
uint64_t start_time = 0;
uint64_t duration = 100000;
int64_t start_time = 0;
int64_t duration = 100000;
uint64_t repeat = 2;
AddSegments(start_time, duration, kSize, repeat);
@ -625,8 +625,8 @@ TEST_F(SegmentTemplateTest, RepeatedSegmentsFromNonZeroStartTime) {
// Segments not starting from 0.
// Start time is 10. Make sure r gets set correctly.
TEST_F(SegmentTemplateTest, NonZeroStartTime) {
const uint64_t kStartTime = 10;
const uint64_t kDuration = 22000;
const int64_t kStartTime = 10;
const int64_t kDuration = 22000;
const uint64_t kSize = 123456;
const uint64_t kRepeat = 1;
AddSegments(kStartTime, kDuration, kSize, kRepeat);
@ -636,13 +636,13 @@ TEST_F(SegmentTemplateTest, NonZeroStartTime) {
// There is a gap in the segments, but still valid.
TEST_F(SegmentTemplateTest, NonContiguousLiveInfo) {
const uint64_t kStartTime = 10;
const uint64_t kDuration = 22000;
const int64_t kStartTime = 10;
const int64_t kDuration = 22000;
const uint64_t kSize = 123456;
const uint64_t kRepeat = 0;
AddSegments(kStartTime, kDuration, kSize, kRepeat);
const uint64_t kStartTimeOffset = 100;
const int64_t kStartTimeOffset = 100;
AddSegments(kDuration + kStartTimeOffset, kDuration, kSize, kRepeat);
EXPECT_THAT(representation_->GetXml().get(), XmlNodeEqual(ExpectedXml()));
@ -651,9 +651,9 @@ TEST_F(SegmentTemplateTest, NonContiguousLiveInfo) {
// Add segments out of order. Segments that start before the previous segment
// cannot be added.
TEST_F(SegmentTemplateTest, OutOfOrder) {
const uint64_t kEarlierStartTime = 0;
const uint64_t kLaterStartTime = 1000;
const uint64_t kDuration = 1000;
const int64_t kEarlierStartTime = 0;
const int64_t kLaterStartTime = 1000;
const int64_t kDuration = 1000;
const uint64_t kSize = 123456;
const uint64_t kRepeat = 0;
@ -665,12 +665,12 @@ TEST_F(SegmentTemplateTest, OutOfOrder) {
// No segments should be overlapping.
TEST_F(SegmentTemplateTest, OverlappingSegments) {
const uint64_t kEarlierStartTime = 0;
const uint64_t kDuration = 1000;
const int64_t kEarlierStartTime = 0;
const int64_t kDuration = 1000;
const uint64_t kSize = 123456;
const uint64_t kRepeat = 0;
const uint64_t kOverlappingSegmentStartTime = kDuration / 2;
const int64_t kOverlappingSegmentStartTime = kDuration / 2;
CHECK_GT(kDuration, kOverlappingSegmentStartTime);
AddSegments(kEarlierStartTime, kDuration, kSize, kRepeat);
@ -683,12 +683,12 @@ TEST_F(SegmentTemplateTest, OverlappingSegments) {
// in the range of rounding error defined inside MpdBuilder, the segment gets
// accepted.
TEST_F(SegmentTemplateTest, OverlappingSegmentsWithinErrorRange) {
const uint64_t kEarlierStartTime = 0;
const uint64_t kDuration = 1000;
const int64_t kEarlierStartTime = 0;
const int64_t kDuration = 1000;
const uint64_t kSize = 123456;
const uint64_t kRepeat = 0;
const uint64_t kOverlappingSegmentStartTime = kDuration - 1;
const int64_t kOverlappingSegmentStartTime = kDuration - 1;
CHECK_GT(kDuration, kOverlappingSegmentStartTime);
AddSegments(kEarlierStartTime, kDuration, kSize, kRepeat);
@ -770,8 +770,8 @@ class ApproximateSegmentTimelineTest : public SegmentTimelineTestBase,
};
TEST_P(ApproximateSegmentTimelineTest, SegmentDurationAdjusted) {
const uint64_t kStartTime = 0;
const uint64_t kDurationSmaller =
const int64_t kStartTime = 0;
const int64_t kDurationSmaller =
kScaledTargetSegmentDuration - kSampleDuration / 2;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDurationSmaller, kSize, 0);
@ -790,8 +790,8 @@ TEST_P(ApproximateSegmentTimelineTest, SegmentDurationAdjusted) {
TEST_P(ApproximateSegmentTimelineTest,
SegmentDurationAdjustedWithNonZeroStartTime) {
const uint64_t kStartTime = 12345;
const uint64_t kDurationSmaller =
const int64_t kStartTime = 12345;
const int64_t kDurationSmaller =
kScaledTargetSegmentDuration - kSampleDuration / 2;
const uint64_t kSize = 128;
@ -810,10 +810,10 @@ TEST_P(ApproximateSegmentTimelineTest,
}
TEST_P(ApproximateSegmentTimelineTest, SegmentsWithSimilarDurations) {
const uint64_t kStartTime = 0;
const uint64_t kDurationSmaller =
const int64_t kStartTime = 0;
const int64_t kDurationSmaller =
kScaledTargetSegmentDuration - kSampleDuration / 2;
const uint64_t kDurationLarger =
const int64_t kDurationLarger =
kScaledTargetSegmentDuration + kSampleDuration / 2;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDurationSmaller, kSize, 0);
@ -845,8 +845,8 @@ TEST_P(ApproximateSegmentTimelineTest, SegmentsWithSimilarDurations) {
// duration; if it is not the case (which should not happen with our demuxer),
// this is how the output would look like.
TEST_P(ApproximateSegmentTimelineTest, SegmentsWithSimilarDurations2) {
const uint64_t kStartTime = 0;
const uint64_t kDurationLarger =
const int64_t kStartTime = 0;
const int64_t kDurationLarger =
kScaledTargetSegmentDuration + kSampleDuration / 2;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDurationLarger, kSize, 0);
@ -868,9 +868,9 @@ TEST_P(ApproximateSegmentTimelineTest, SegmentsWithSimilarDurations2) {
}
TEST_P(ApproximateSegmentTimelineTest, FillSmallGap) {
const uint64_t kStartTime = 0;
const uint64_t kDuration = kScaledTargetSegmentDuration;
const uint64_t kGap = kSampleDuration / 2;
const int64_t kStartTime = 0;
const int64_t kDuration = kScaledTargetSegmentDuration;
const int64_t kGap = kSampleDuration / 2;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDuration, kSize, 0);
AddSegments(kStartTime + kDuration + kGap, kDuration, kSize, 0);
@ -892,9 +892,9 @@ TEST_P(ApproximateSegmentTimelineTest, FillSmallGap) {
}
TEST_P(ApproximateSegmentTimelineTest, FillSmallOverlap) {
const uint64_t kStartTime = 0;
const uint64_t kDuration = kScaledTargetSegmentDuration;
const uint64_t kOverlap = kSampleDuration / 2;
const int64_t kStartTime = 0;
const int64_t kDuration = kScaledTargetSegmentDuration;
const int64_t kOverlap = kSampleDuration / 2;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDuration, kSize, 0);
AddSegments(kStartTime + kDuration - kOverlap, kDuration, kSize, 0);
@ -931,8 +931,8 @@ TEST_P(ApproximateSegmentTimelineTest, NoSampleDuration) {
kAnyRepresentationId, NoListener());
ASSERT_TRUE(representation_->Init());
const uint64_t kStartTime = 0;
const uint64_t kDuration = kScaledTargetSegmentDuration;
const int64_t kStartTime = 0;
const int64_t kDuration = kScaledTargetSegmentDuration;
const uint64_t kSize = 128;
AddSegments(kStartTime, kDuration, kSize, 0);
AddSegments(kStartTime + kDuration, kDuration, kSize, 0);
@ -956,7 +956,7 @@ INSTANTIATE_TEST_CASE_P(ApproximateSegmentTimelineTest,
Bool());
class TimeShiftBufferDepthTest : public SegmentTimelineTestBase,
public WithParamInterface<uint64_t> {
public WithParamInterface<int64_t> {
public:
void SetUp() override {
initial_start_time_ = GetParam();
@ -966,7 +966,7 @@ class TimeShiftBufferDepthTest : public SegmentTimelineTestBase,
MpdOptions* mutable_mpd_options() { return &mpd_options_; }
protected:
uint64_t initial_start_time_;
int64_t initial_start_time_;
};
// All segments have the same duration and size.
@ -976,7 +976,7 @@ TEST_P(TimeShiftBufferDepthTest, Normal) {
kTimeShiftBufferDepth;
// Trick to make every segment 1 second long.
const uint64_t kDuration = kDefaultTimeScale;
const int64_t kDuration = kDefaultTimeScale;
const uint64_t kSize = 10000;
const uint64_t kRepeat = 1234;
const uint64_t kLength = kRepeat;
@ -1013,7 +1013,7 @@ TEST_P(TimeShiftBufferDepthTest, TimeShiftBufferDepthShorterThanSegmentLength) {
kTimeShiftBufferDepth;
// Each duration is a second longer than timeShiftBufferDepth.
const uint64_t kDuration = kDefaultTimeScale * (kTimeShiftBufferDepth + 1);
const int64_t kDuration = kDefaultTimeScale * (kTimeShiftBufferDepth + 1);
const uint64_t kSize = 10000;
const uint64_t kRepeat = 1;
@ -1032,18 +1032,18 @@ TEST_P(TimeShiftBufferDepthTest, Generic) {
mutable_mpd_options()->mpd_params.time_shift_buffer_depth =
kTimeShiftBufferDepth;
const uint64_t kDuration = kDefaultTimeScale;
const int64_t kDuration = kDefaultTimeScale;
const uint64_t kSize = 10000;
const uint64_t kRepeat = 1000;
AddSegments(initial_start_time_, kDuration, kSize, kRepeat);
const uint64_t first_s_element_end_time =
const int64_t first_s_element_end_time =
initial_start_time_ + kDuration * (kRepeat + 1);
// Now add 2 kTimeShiftBufferDepth long segments.
const int kNumMoreSegments = 2;
const int kMoreSegmentsRepeat = kNumMoreSegments - 1;
const uint64_t kTimeShiftBufferDepthDuration =
const int64_t kTimeShiftBufferDepthDuration =
kDefaultTimeScale * kTimeShiftBufferDepth;
AddSegments(first_s_element_end_time, kTimeShiftBufferDepthDuration, kSize,
kMoreSegmentsRepeat);
@ -1073,14 +1073,14 @@ TEST_P(TimeShiftBufferDepthTest, MoreThanOneS) {
const uint64_t kSize = 20000;
const uint64_t kOneSecondDuration = kDefaultTimeScale;
const int64_t kOneSecondDuration = kDefaultTimeScale;
const uint64_t kOneSecondSegmentRepeat = 99;
AddSegments(initial_start_time_, kOneSecondDuration, kSize,
kOneSecondSegmentRepeat);
const uint64_t first_s_element_end_time =
const int64_t first_s_element_end_time =
initial_start_time_ + kOneSecondDuration * (kOneSecondSegmentRepeat + 1);
const uint64_t kTwoSecondDuration = 2 * kDefaultTimeScale;
const int64_t kTwoSecondDuration = 2 * kDefaultTimeScale;
const uint64_t kTwoSecondSegmentRepeat = 20;
AddSegments(first_s_element_end_time, kTwoSecondDuration, kSize,
kTwoSecondSegmentRepeat);
@ -1116,16 +1116,16 @@ TEST_P(TimeShiftBufferDepthTest, UseLastSegmentInS) {
mutable_mpd_options()->mpd_params.time_shift_buffer_depth =
kTimeShiftBufferDepth;
const uint64_t kDuration1 = static_cast<uint64_t>(kDefaultTimeScale * 1.5);
const int64_t kDuration1 = static_cast<int64_t>(kDefaultTimeScale * 1.5);
const uint64_t kSize = 20000;
const uint64_t kRepeat1 = 1;
AddSegments(initial_start_time_, kDuration1, kSize, kRepeat1);
const uint64_t first_s_element_end_time =
const int64_t first_s_element_end_time =
initial_start_time_ + kDuration1 * (kRepeat1 + 1);
const uint64_t kTwoSecondDuration = 2 * kDefaultTimeScale;
const int64_t kTwoSecondDuration = 2 * kDefaultTimeScale;
const uint64_t kTwoSecondSegmentRepeat = 4;
AddSegments(first_s_element_end_time, kTwoSecondDuration, kSize,
@ -1149,7 +1149,7 @@ TEST_P(TimeShiftBufferDepthTest, NormalGap) {
mutable_mpd_options()->mpd_params.time_shift_buffer_depth =
kTimeShiftBufferDepth;
const uint64_t kDuration = kDefaultTimeScale;
const int64_t kDuration = kDefaultTimeScale;
const uint64_t kSize = 20000;
const uint64_t kRepeat = 6;
// CHECK here so that the when next S element is added with 1 segment, this S
@ -1159,10 +1159,10 @@ TEST_P(TimeShiftBufferDepthTest, NormalGap) {
AddSegments(initial_start_time_, kDuration, kSize, kRepeat);
const uint64_t first_s_element_end_time =
const int64_t first_s_element_end_time =
initial_start_time_ + kDuration * (kRepeat + 1);
const uint64_t gap_s_element_start_time = first_s_element_end_time + 1;
const int64_t gap_s_element_start_time = first_s_element_end_time + 1;
AddSegments(gap_s_element_start_time, kDuration, kSize, /* no repeat */ 0);
std::string expected_s_element = base::StringPrintf(
@ -1181,21 +1181,21 @@ TEST_P(TimeShiftBufferDepthTest, HugeGap) {
mutable_mpd_options()->mpd_params.time_shift_buffer_depth =
kTimeShiftBufferDepth;
const uint64_t kDuration = kDefaultTimeScale;
const int64_t kDuration = kDefaultTimeScale;
const uint64_t kSize = 20000;
const uint64_t kRepeat = 6;
AddSegments(initial_start_time_, kDuration, kSize, kRepeat);
const uint64_t first_s_element_end_time =
const int64_t first_s_element_end_time =
initial_start_time_ + kDuration * (kRepeat + 1);
// Big enough gap so first S element should not be there.
const uint64_t gap_s_element_start_time =
const int64_t gap_s_element_start_time =
first_s_element_end_time +
(kTimeShiftBufferDepth + 1) * kDefaultTimeScale;
const uint64_t kSecondSElementRepeat = 9;
static_assert(
kSecondSElementRepeat < static_cast<uint64_t>(kTimeShiftBufferDepth),
kSecondSElementRepeat < static_cast<int64_t>(kTimeShiftBufferDepth),
"second_s_element_repeat_must_be_less_than_time_shift_buffer_depth");
AddSegments(gap_s_element_start_time, kDuration, kSize,
kSecondSElementRepeat);
@ -1216,7 +1216,7 @@ TEST_P(TimeShiftBufferDepthTest, ManySegments) {
mutable_mpd_options()->mpd_params.time_shift_buffer_depth =
kTimeShiftBufferDepth;
const uint64_t kDuration = kDefaultTimeScale;
const int64_t kDuration = kDefaultTimeScale;
const uint64_t kSize = 20000;
const uint64_t kRepeat = 10000;
const uint64_t kTotalNumSegments = kRepeat + 1;
@ -1252,8 +1252,8 @@ const char kSegmentTemplate[] = "memory://$Number$.mp4";
const char kSegmentTemplateUrl[] = "video/$Number$.mp4";
const char kStringPrintTemplate[] = "memory://%d.mp4";
const uint64_t kInitialStartTime = 0;
const uint64_t kDuration = kDefaultTimeScale;
const int64_t kInitialStartTime = 0;
const int64_t kDuration = kDefaultTimeScale;
const uint64_t kSize = 10;
const uint64_t kNoRepeat = 0;
} // namespace

View File

@ -12,8 +12,8 @@ namespace shaka {
/// Used for keeping track of all the segments used for generating MPD with
/// dynamic profile.
struct SegmentInfo {
uint64_t start_time;
uint64_t duration;
int64_t start_time;
int64_t duration;
// This is the number of times same duration segments are repeated not
// inclusive. In other words if this is the only one segment that starts at
// |start_time| and has |duration| but none others have |start_time| * N and

View File

@ -60,7 +60,7 @@ bool IsTimelineConstantDuration(const std::list<SegmentInfo>& segment_infos,
if (last_segment.repeat != 0)
return false;
const uint64_t expected_last_segment_start_time =
const int64_t expected_last_segment_start_time =
first_segment.start_time +
first_segment.duration * (first_segment.repeat + 1);
return expected_last_segment_start_time == last_segment.start_time;