From 79174c196f5be6326ae812012a92d1fcd448d75c Mon Sep 17 00:00:00 2001 From: akankshamahajan Date: Thu, 14 Dec 2023 17:04:03 -0800 Subject: [PATCH] Format code Summary: Test Plan: Reviewers: Subscribers: Tasks: Tags: --- file/file_prefetch_buffer.cc | 166 ++++++++++--------- table/block_based/block_based_table_reader.h | 10 +- 2 files changed, 88 insertions(+), 88 deletions(-) diff --git a/file/file_prefetch_buffer.cc b/file/file_prefetch_buffer.cc index fdb6537fd0e7..e8c4db45e09a 100644 --- a/file/file_prefetch_buffer.cc +++ b/file/file_prefetch_buffer.cc @@ -657,99 +657,101 @@ Status FilePrefetchBuffer::PrefetchInternal(const IOOptions& opts, CopyDataToBuffer(buf, offset, length); } return s; - } +} - bool FilePrefetchBuffer::TryReadFromCache( - const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset, - size_t n, Slice* result, Status* status, bool for_compaction) { - bool ret = TryReadFromCacheUntracked(opts, reader, offset, n, result, - status, for_compaction); - if (usage_ == FilePrefetchBufferUsage::kTableOpenPrefetchTail && enable_) { - if (ret) { - RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_HIT); - } else { - RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_MISS); - } +bool FilePrefetchBuffer::TryReadFromCache(const IOOptions& opts, + RandomAccessFileReader* reader, + uint64_t offset, size_t n, + Slice* result, Status* status, + bool for_compaction) { + bool ret = TryReadFromCacheUntracked(opts, reader, offset, n, result, status, + for_compaction); + if (usage_ == FilePrefetchBufferUsage::kTableOpenPrefetchTail && enable_) { + if (ret) { + RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_HIT); + } else { + RecordTick(stats_, TABLE_OPEN_PREFETCH_TAIL_MISS); } - return ret; } + return ret; +} - bool FilePrefetchBuffer::TryReadFromCacheUntracked( - const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset, - size_t n, Slice* result, Status* status, bool for_compaction) { - if (track_min_offset_ && offset < min_offset_read_) { - min_offset_read_ = static_cast(offset); - } - - if (!enable_) { - return false; - } - - if (explicit_prefetch_submitted_) { - // explicit_prefetch_submitted_ is special case where it expects request - // submitted in PrefetchAsync should match with this request. Otherwise - // buffers will be outdated. - // Random offset called. So abort the IOs. - if (prev_offset_ != offset) { - AbortAllIOs(); - FreeAllBuffers(); - explicit_prefetch_submitted_ = false; - return false; - } - } +bool FilePrefetchBuffer::TryReadFromCacheUntracked( + const IOOptions& opts, RandomAccessFileReader* reader, uint64_t offset, + size_t n, Slice* result, Status* status, bool for_compaction) { + if (track_min_offset_ && offset < min_offset_read_) { + min_offset_read_ = static_cast(offset); + } - AllocateBufferIfEmpty(); - BufferInfo* buf = GetFirstBuffer(); + if (!enable_) { + return false; + } - if (!explicit_prefetch_submitted_ && offset < buf->offset_) { + if (explicit_prefetch_submitted_) { + // explicit_prefetch_submitted_ is special case where it expects request + // submitted in PrefetchAsync should match with this request. Otherwise + // buffers will be outdated. + // Random offset called. So abort the IOs. + if (prev_offset_ != offset) { + AbortAllIOs(); + FreeAllBuffers(); + explicit_prefetch_submitted_ = false; return false; } + } - bool prefetched = false; - bool copy_to_overlap_buffer = false; - // If the buffer contains only a few of the requested bytes: - // If readahead is enabled: prefetch the remaining bytes + readahead - // bytes - // and satisfy the request. - // If readahead is not enabled: return false. - TEST_SYNC_POINT_CALLBACK("FilePrefetchBuffer::TryReadFromCache", - &readahead_size_); - - if (explicit_prefetch_submitted_ || - (buf->async_read_in_progress_ || - offset + n > buf->offset_ + buf->CurrentSize())) { - // In case readahead_size is trimmed (=0), we still want to poll the data - // submitted with explicit_prefetch_submitted_=true. - if (readahead_size_ > 0 || explicit_prefetch_submitted_) { - Status s; - assert(reader != nullptr); - assert(max_readahead_size_ >= readahead_size_); - - if (for_compaction) { - s = Prefetch(opts, reader, offset, std::max(n, readahead_size_)); - } else { - if (implicit_auto_readahead_) { - if (!IsEligibleForPrefetch(offset, n)) { - // Ignore status as Prefetch is not called. - s.PermitUncheckedError(); - return false; - } - } + AllocateBufferIfEmpty(); + BufferInfo* buf = GetFirstBuffer(); - // Prefetch n + readahead_size_/2 synchronously as remaining - // readahead_size_/2 will be prefetched asynchronously if num_buffers_ - // > 1. - s = PrefetchInternal( - opts, reader, offset, n, - (num_buffers_ > 1 ? readahead_size_ / 2 : readahead_size_), - copy_to_overlap_buffer); - explicit_prefetch_submitted_ = false; + if (!explicit_prefetch_submitted_ && offset < buf->offset_) { + return false; + } + + bool prefetched = false; + bool copy_to_overlap_buffer = false; + // If the buffer contains only a few of the requested bytes: + // If readahead is enabled: prefetch the remaining bytes + readahead + // bytes + // and satisfy the request. + // If readahead is not enabled: return false. + TEST_SYNC_POINT_CALLBACK("FilePrefetchBuffer::TryReadFromCache", + &readahead_size_); + + if (explicit_prefetch_submitted_ || + (buf->async_read_in_progress_ || + offset + n > buf->offset_ + buf->CurrentSize())) { + // In case readahead_size is trimmed (=0), we still want to poll the data + // submitted with explicit_prefetch_submitted_=true. + if (readahead_size_ > 0 || explicit_prefetch_submitted_) { + Status s; + assert(reader != nullptr); + assert(max_readahead_size_ >= readahead_size_); + + if (for_compaction) { + s = Prefetch(opts, reader, offset, std::max(n, readahead_size_)); + } else { + if (implicit_auto_readahead_) { + if (!IsEligibleForPrefetch(offset, n)) { + // Ignore status as Prefetch is not called. + s.PermitUncheckedError(); + return false; + } } - if (!s.ok()) { - if (status) { - *status = s; - } + // Prefetch n + readahead_size_/2 synchronously as remaining + // readahead_size_/2 will be prefetched asynchronously if num_buffers_ + // > 1. + s = PrefetchInternal( + opts, reader, offset, n, + (num_buffers_ > 1 ? readahead_size_ / 2 : readahead_size_), + copy_to_overlap_buffer); + explicit_prefetch_submitted_ = false; + } + + if (!s.ok()) { + if (status) { + *status = s; + } #ifndef NDEBUG IGNORE_STATUS_IF_ERROR(s); #endif @@ -775,7 +777,7 @@ Status FilePrefetchBuffer::PrefetchInternal(const IOOptions& opts, readahead_size_ = std::min(max_readahead_size_, readahead_size_ * 2); } return true; - } +} void FilePrefetchBuffer::PrefetchAsyncCallback(const FSReadRequest& req, void* cb_arg) { diff --git a/table/block_based/block_based_table_reader.h b/table/block_based/block_based_table_reader.h index b3ecb9ebdf72..262b53a7aef1 100644 --- a/table/block_based/block_based_table_reader.h +++ b/table/block_based/block_based_table_reader.h @@ -707,12 +707,10 @@ struct BlockBasedTable::Rep { } void CreateFilePrefetchBufferIfNotExists( - const ReadaheadParams& readahead_params, - std::unique_ptr* fpb, - const std::function& - readaheadsize_cb, - FilePrefetchBufferUsage usage = FilePrefetchBufferUsage::kUnknown) - const { + const ReadaheadParams& readahead_params, + std::unique_ptr* fpb, + const std::function& readaheadsize_cb, + FilePrefetchBufferUsage usage = FilePrefetchBufferUsage::kUnknown) const { if (!(*fpb)) { CreateFilePrefetchBuffer(readahead_params, fpb, readaheadsize_cb, usage); }