2015-03-13 00:54:12 +00:00
|
|
|
// Copyright 2015 Google Inc. All rights reserved.
|
|
|
|
//
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file or at
|
|
|
|
// https://developers.google.com/open-source/licenses/bsd
|
|
|
|
|
2017-07-10 18:26:22 +00:00
|
|
|
#include "packager/file/io_cache.h"
|
2015-03-13 00:54:12 +00:00
|
|
|
|
|
|
|
#include <string.h>
|
|
|
|
|
|
|
|
#include <algorithm>
|
|
|
|
|
2015-08-20 19:06:02 +00:00
|
|
|
#include "packager/base/logging.h"
|
2015-03-13 00:54:12 +00:00
|
|
|
|
2016-05-20 21:19:33 +00:00
|
|
|
namespace shaka {
|
2015-03-13 00:54:12 +00:00
|
|
|
|
|
|
|
using base::AutoLock;
|
|
|
|
using base::AutoUnlock;
|
|
|
|
|
|
|
|
IoCache::IoCache(uint64_t cache_size)
|
|
|
|
: cache_size_(cache_size),
|
2016-08-17 21:42:23 +00:00
|
|
|
read_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
|
|
|
|
base::WaitableEvent::InitialState::NOT_SIGNALED),
|
|
|
|
write_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC,
|
|
|
|
base::WaitableEvent::InitialState::NOT_SIGNALED),
|
2015-03-13 00:54:12 +00:00
|
|
|
// Make the buffer one byte larger than the cache so that when the
|
|
|
|
// condition r_ptr == w_ptr is unambiguous (buffer empty).
|
|
|
|
circular_buffer_(cache_size + 1),
|
|
|
|
end_ptr_(&circular_buffer_[0] + cache_size + 1),
|
2016-01-29 21:23:12 +00:00
|
|
|
r_ptr_(circular_buffer_.data()),
|
|
|
|
w_ptr_(circular_buffer_.data()),
|
2015-03-13 00:54:12 +00:00
|
|
|
closed_(false) {}
|
|
|
|
|
|
|
|
IoCache::~IoCache() {
|
|
|
|
Close();
|
|
|
|
}
|
|
|
|
|
2015-03-18 21:35:25 +00:00
|
|
|
uint64_t IoCache::Read(void* buffer, uint64_t size) {
|
2015-03-13 00:54:12 +00:00
|
|
|
DCHECK(buffer);
|
|
|
|
|
|
|
|
AutoLock lock(lock_);
|
|
|
|
while (!closed_ && (BytesCachedInternal() == 0)) {
|
|
|
|
AutoUnlock unlock(lock_);
|
|
|
|
write_event_.Wait();
|
|
|
|
}
|
|
|
|
|
|
|
|
size = std::min(size, BytesCachedInternal());
|
2017-07-10 18:26:22 +00:00
|
|
|
uint64_t first_chunk_size(
|
|
|
|
std::min(size, static_cast<uint64_t>(end_ptr_ - r_ptr_)));
|
2015-03-13 00:54:12 +00:00
|
|
|
memcpy(buffer, r_ptr_, first_chunk_size);
|
|
|
|
r_ptr_ += first_chunk_size;
|
|
|
|
DCHECK_GE(end_ptr_, r_ptr_);
|
|
|
|
if (r_ptr_ == end_ptr_)
|
|
|
|
r_ptr_ = &circular_buffer_[0];
|
|
|
|
uint64_t second_chunk_size(size - first_chunk_size);
|
|
|
|
if (second_chunk_size) {
|
|
|
|
memcpy(static_cast<uint8_t*>(buffer) + first_chunk_size, r_ptr_,
|
|
|
|
second_chunk_size);
|
|
|
|
r_ptr_ += second_chunk_size;
|
|
|
|
DCHECK_GT(end_ptr_, r_ptr_);
|
|
|
|
}
|
|
|
|
read_event_.Signal();
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2015-03-18 21:35:25 +00:00
|
|
|
uint64_t IoCache::Write(const void* buffer, uint64_t size) {
|
2015-03-13 00:54:12 +00:00
|
|
|
DCHECK(buffer);
|
|
|
|
|
|
|
|
const uint8_t* r_ptr(static_cast<const uint8_t*>(buffer));
|
|
|
|
uint64_t bytes_left(size);
|
|
|
|
while (bytes_left) {
|
|
|
|
AutoLock lock(lock_);
|
|
|
|
while (!closed_ && (BytesFreeInternal() == 0)) {
|
|
|
|
AutoUnlock unlock(lock_);
|
2018-06-22 00:38:47 +00:00
|
|
|
VLOG(1) << "Circular buffer is full, which can happen if data arrives "
|
|
|
|
"faster than being consumed by packager. Ignore if it is not "
|
|
|
|
"live packaging. Otherwise, try increasing --io_cache_size.";
|
2015-03-13 00:54:12 +00:00
|
|
|
read_event_.Wait();
|
|
|
|
}
|
|
|
|
if (closed_)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
uint64_t write_size(std::min(bytes_left, BytesFreeInternal()));
|
2017-07-10 18:26:22 +00:00
|
|
|
uint64_t first_chunk_size(
|
|
|
|
std::min(write_size, static_cast<uint64_t>(end_ptr_ - w_ptr_)));
|
2015-03-13 00:54:12 +00:00
|
|
|
memcpy(w_ptr_, r_ptr, first_chunk_size);
|
|
|
|
w_ptr_ += first_chunk_size;
|
|
|
|
DCHECK_GE(end_ptr_, w_ptr_);
|
|
|
|
if (w_ptr_ == end_ptr_)
|
|
|
|
w_ptr_ = &circular_buffer_[0];
|
|
|
|
r_ptr += first_chunk_size;
|
|
|
|
uint64_t second_chunk_size(write_size - first_chunk_size);
|
|
|
|
if (second_chunk_size) {
|
|
|
|
memcpy(w_ptr_, r_ptr, second_chunk_size);
|
|
|
|
w_ptr_ += second_chunk_size;
|
|
|
|
DCHECK_GT(end_ptr_, w_ptr_);
|
|
|
|
r_ptr += second_chunk_size;
|
|
|
|
}
|
|
|
|
bytes_left -= write_size;
|
|
|
|
write_event_.Signal();
|
|
|
|
}
|
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
|
|
|
void IoCache::Clear() {
|
|
|
|
AutoLock lock(lock_);
|
2016-01-29 21:23:12 +00:00
|
|
|
r_ptr_ = w_ptr_ = circular_buffer_.data();
|
2015-03-13 00:54:12 +00:00
|
|
|
// Let any writers know that there is room in the cache.
|
|
|
|
read_event_.Signal();
|
|
|
|
}
|
|
|
|
|
|
|
|
void IoCache::Close() {
|
|
|
|
AutoLock lock(lock_);
|
|
|
|
closed_ = true;
|
|
|
|
read_event_.Signal();
|
|
|
|
write_event_.Signal();
|
|
|
|
}
|
|
|
|
|
2015-10-16 20:10:42 +00:00
|
|
|
void IoCache::Reopen() {
|
|
|
|
AutoLock lock(lock_);
|
|
|
|
CHECK(closed_);
|
2016-01-29 21:23:12 +00:00
|
|
|
r_ptr_ = w_ptr_ = circular_buffer_.data();
|
2015-10-16 20:10:42 +00:00
|
|
|
closed_ = false;
|
|
|
|
read_event_.Reset();
|
|
|
|
write_event_.Reset();
|
|
|
|
}
|
|
|
|
|
2015-03-13 00:54:12 +00:00
|
|
|
uint64_t IoCache::BytesCached() {
|
|
|
|
AutoLock lock(lock_);
|
|
|
|
return BytesCachedInternal();
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t IoCache::BytesFree() {
|
|
|
|
AutoLock lock(lock_);
|
|
|
|
return BytesFreeInternal();
|
|
|
|
}
|
|
|
|
|
2015-03-24 22:29:55 +00:00
|
|
|
uint64_t IoCache::BytesCachedInternal() {
|
2016-01-29 21:23:12 +00:00
|
|
|
return (r_ptr_ <= w_ptr_)
|
|
|
|
? w_ptr_ - r_ptr_
|
|
|
|
: (end_ptr_ - r_ptr_) + (w_ptr_ - circular_buffer_.data());
|
2015-03-13 00:54:12 +00:00
|
|
|
}
|
|
|
|
|
2015-03-24 22:29:55 +00:00
|
|
|
uint64_t IoCache::BytesFreeInternal() {
|
2015-03-13 00:54:12 +00:00
|
|
|
return cache_size_ - BytesCachedInternal();
|
|
|
|
}
|
|
|
|
|
2015-03-19 18:28:04 +00:00
|
|
|
void IoCache::WaitUntilEmptyOrClosed() {
|
|
|
|
AutoLock lock(lock_);
|
|
|
|
while (!closed_ && BytesCachedInternal()) {
|
|
|
|
AutoUnlock unlock(lock_);
|
|
|
|
read_event_.Wait();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-20 21:19:33 +00:00
|
|
|
} // namespace shaka
|