Skip to content

Commit

Permalink
Make auto_readahead_size default true
Browse files Browse the repository at this point in the history
Summary: Make auto_readahead_size option default true

Test Plan: benchmarks and exisiting tests

Reviewers:

Subscribers:

Tasks:

Tags:
  • Loading branch information
akankshamahajan15 committed Dec 14, 2023
1 parent d926593 commit d620813
Show file tree
Hide file tree
Showing 13 changed files with 39 additions and 507 deletions.
22 changes: 10 additions & 12 deletions file/file_prefetch_buffer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -356,6 +356,7 @@ void FilePrefetchBuffer::ReadAheadSizeTuning(
uint64_t updated_end_offset =
Roundup(start_offset + length + readahead_size, alignment);
uint64_t initial_end_offset = updated_end_offset;
uint64_t initial_start_offset = updated_start_offset;

// Callback to tune the start and end offsets.
if (readaheadsize_cb_ != nullptr && readahead_size > 0) {
Expand All @@ -365,6 +366,8 @@ void FilePrefetchBuffer::ReadAheadSizeTuning(

// read_len will be 0 and there is nothing to read/prefetch.
if (updated_start_offset == updated_end_offset) {
UpdateReadAheadTrimmedStat((initial_end_offset - initial_start_offset),
(updated_end_offset - updated_start_offset));
return;
}

Expand All @@ -377,6 +380,8 @@ void FilePrefetchBuffer::ReadAheadSizeTuning(
// means data has been already prefetched.
if (updated_end_offset <= prev_buf_end_offset) {
start_offset = end_offset = prev_buf_end_offset;
UpdateReadAheadTrimmedStat((initial_end_offset - initial_start_offset),
(end_offset - start_offset));
return;
}
}
Expand Down Expand Up @@ -404,6 +409,9 @@ void FilePrefetchBuffer::ReadAheadSizeTuning(
// offset of next prefetch.
bufs_[index].initial_end_offset_ = initial_end_offset;
read_len = static_cast<size_t>(roundup_len - chunk_len);

UpdateReadAheadTrimmedStat((initial_end_offset - initial_start_offset),
(end_offset - start_offset));
}

Status FilePrefetchBuffer::HandleOverlappingData(
Expand Down Expand Up @@ -449,8 +457,7 @@ Status FilePrefetchBuffer::HandleOverlappingData(
uint64_t start_offset = bufs_[second].initial_end_offset_;
// Second buffer might be out of bound if first buffer already prefetched
// that data.
if (tmp_offset + tmp_length <= bufs_[second].offset_ + second_size &&
!IsOffsetOutOfBound(start_offset)) {
if (tmp_offset + tmp_length <= bufs_[second].offset_ + second_size) {
size_t read_len = 0;
uint64_t end_offset = start_offset, chunk_len = 0;

Expand Down Expand Up @@ -635,9 +642,6 @@ Status FilePrefetchBuffer::PrefetchAsyncInternal(const IOOptions& opts,
// prefetching.
uint64_t start_offset2 = bufs_[curr_].initial_end_offset_;

// Second buffer might be out of bound if first buffer already prefetched
// that data.
if (!IsOffsetOutOfBound(start_offset2)) {
// Find updated readahead size after tuning
size_t read_len2 = 0;
uint64_t end_offset2 = start_offset2, chunk_len2 = 0;
Expand All @@ -653,7 +657,6 @@ Status FilePrefetchBuffer::PrefetchAsyncInternal(const IOOptions& opts,
bufs_[second].ClearBuffer();
return s;
}
}
}
}

Expand Down Expand Up @@ -737,7 +740,6 @@ bool FilePrefetchBuffer::TryReadFromCacheUntracked(
return false;
}
}
UpdateReadAheadSizeForUpperBound(offset, n);
s = Prefetch(opts, reader, offset, n + readahead_size_);
}
if (!s.ok()) {
Expand Down Expand Up @@ -837,8 +839,6 @@ bool FilePrefetchBuffer::TryReadFromCacheAsyncUntracked(
}
}

UpdateReadAheadSizeForUpperBound(offset, n);

// Prefetch n + readahead_size_/2 synchronously as remaining
// readahead_size_/2 will be prefetched asynchronously.
s = PrefetchAsyncInternal(opts, reader, offset, n, readahead_size_ / 2,
Expand Down Expand Up @@ -919,7 +919,6 @@ Status FilePrefetchBuffer::PrefetchAsync(const IOOptions& opts,
explicit_prefetch_submitted_ = false;
bool is_eligible_for_prefetching = false;

UpdateReadAheadSizeForUpperBound(offset, n);
if (readahead_size_ > 0 &&
(!implicit_auto_readahead_ ||
num_file_reads_ >= num_file_reads_for_auto_readahead_)) {
Expand Down Expand Up @@ -1014,14 +1013,13 @@ Status FilePrefetchBuffer::PrefetchAsync(const IOOptions& opts,
start_offset2 = bufs_[curr_].initial_end_offset_;
// Second buffer might be out of bound if first buffer already prefetched
// that data.
if (!IsOffsetOutOfBound(start_offset2)) {

uint64_t end_offset2 = start_offset2, chunk_len2 = 0;
ReadAheadSizeTuning(/*read_curr_block=*/false, /*refit_tail=*/false,
/*prev_buf_end_offset=*/end_offset1, second,
alignment,
/*length=*/0, readahead_size, start_offset2,
end_offset2, read_len2, chunk_len2);
}
}

if (read_len1) {
Expand Down
39 changes: 8 additions & 31 deletions file/file_prefetch_buffer.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,7 @@ class FilePrefetchBuffer {
size_t readahead_size = 0, size_t max_readahead_size = 0,
bool enable = true, bool track_min_offset = false,
bool implicit_auto_readahead = false, uint64_t num_file_reads = 0,
uint64_t num_file_reads_for_auto_readahead = 0,
uint64_t upper_bound_offset = 0, FileSystem* fs = nullptr,
uint64_t num_file_reads_for_auto_readahead = 0, FileSystem* fs = nullptr,
SystemClock* clock = nullptr, Statistics* stats = nullptr,
const std::function<void(bool, uint64_t&, uint64_t&)>& cb = nullptr,
FilePrefetchBufferUsage usage = FilePrefetchBufferUsage::kUnknown)
Expand All @@ -127,7 +126,6 @@ class FilePrefetchBuffer {
clock_(clock),
stats_(stats),
usage_(usage),
upper_bound_offset_(upper_bound_offset),
readaheadsize_cb_(cb) {
assert((num_file_reads_ >= num_file_reads_for_auto_readahead_ + 1) ||
(num_file_reads_ == 0));
Expand Down Expand Up @@ -296,11 +294,6 @@ class FilePrefetchBuffer {
// Callback function passed to underlying FS in case of asynchronous reads.
void PrefetchAsyncCallback(const FSReadRequest& req, void* cb_arg);

void ResetUpperBoundOffset(uint64_t upper_bound_offset) {
upper_bound_offset_ = upper_bound_offset;
readahead_size_ = initial_auto_readahead_size_;
}

void TEST_GetBufferOffsetandSize(uint32_t index, uint64_t& offset,
size_t& len) {
offset = bufs_[index].offset_;
Expand Down Expand Up @@ -452,25 +445,6 @@ class FilePrefetchBuffer {
uint64_t offset, size_t n, Slice* result,
Status* status);

void UpdateReadAheadSizeForUpperBound(uint64_t offset, size_t n) {
// Adjust readhahead_size till upper_bound if upper_bound_offset_ is
// set.
if (readahead_size_ > 0 && upper_bound_offset_ > 0 &&
upper_bound_offset_ > offset) {
if (upper_bound_offset_ < offset + n + readahead_size_) {
readahead_size_ = (upper_bound_offset_ - offset) - n;
RecordTick(stats_, READAHEAD_TRIMMED);
}
}
}

inline bool IsOffsetOutOfBound(uint64_t offset) {
if (upper_bound_offset_ > 0) {
return (offset >= upper_bound_offset_);
}
return false;
}

void ReadAheadSizeTuning(bool read_curr_block, bool refit_tail,
uint64_t prev_buf_end_offset, uint32_t index,
size_t alignment, size_t length,
Expand All @@ -487,6 +461,13 @@ class FilePrefetchBuffer {
}
}

void UpdateReadAheadTrimmedStat(size_t initial_length,
size_t updated_length) {
if (initial_length != updated_length) {
RecordTick(stats_, READAHEAD_TRIMMED);
}
}

std::vector<BufferInfo> bufs_;
// curr_ represents the index for bufs_ indicating which buffer is being
// consumed currently.
Expand Down Expand Up @@ -529,10 +510,6 @@ class FilePrefetchBuffer {

FilePrefetchBufferUsage usage_;

// upper_bound_offset_ is set when ReadOptions.iterate_upper_bound and
// ReadOptions.auto_readahead_size are set to trim readahead_size upto
// upper_bound_offset_ during prefetching.
uint64_t upper_bound_offset_ = 0;
std::function<void(bool, uint64_t&, uint64_t&)> readaheadsize_cb_;
};
} // namespace ROCKSDB_NAMESPACE
Loading

0 comments on commit d620813

Please sign in to comment.