- Arena
- Hash
- Slice
- Bloom
- MutexLock
- Cache
- Coding
- Comparator
- CRC32
- Env
- Histogram
- Logging
- NoDestructor
- Options
- Random
- Status
- Iterator
util/
文件夹下的一些源码,各个组件比较独立,适合刚开始阅读
本文在一些关键路径上写上了注释,并对每个结构给出了一些总结性的点。
Arena
用户态内存管理
- Block 大小为 4096B
- 一次申请大于 1/4 * BlockSize = 1024B 会单独分配一个大小适合的 Block
- 申请小批量内存可以复用之前的 Block
- 有内存对齐
头文件
class Arena {
public:
Arena();
// 删除了拷贝构造函数和赋值构造函数,只能进行引用传递
Arena(const Arena&) = delete;
Arena& operator=(const Arena&) = delete;
~Arena();
// Return a pointer to a newly allocated memory block of "bytes" bytes.
char* Allocate(size_t bytes);
// 带对齐的 allocate
// Allocate memory with the normal alignment guarantees provided by malloc.
char* AllocateAligned(size_t bytes);
// Returns an estimate of the total memory usage of data allocated
// by the arena.
size_t MemoryUsage() const {
return memory_usage_.load(std::memory_order_relaxed);
}
private:
char* AllocateFallback(size_t bytes);
char* AllocateNewBlock(size_t block_bytes);
// Allocation state
// 当前 alloc_ptr_ 所在的 block 还剩多少可以分配
char* alloc_ptr_;
size_t alloc_bytes_remaining_;
// Array of new[] allocated memory blocks
std::vector<char*> blocks_;
// 这个 Arena 不是线程安全的,atomic 可以让 Arena::MemoryUsage() 不用加锁即可访问
// Total memory usage of the arena.
//
// TODO(costan): This member is accessed via atomics, but the others are
// accessed without any locking. Is this OK?
std::atomic<size_t> memory_usage_;
};
inline char* Arena::Allocate(size_t bytes) {
// The semantics of what to return are a bit messy if we allow
// 0-byte allocations, so we disallow them here (we don't need
// them for our internal use).
assert(bytes > 0);
if (bytes <= alloc_bytes_remaining_) {
char* result = alloc_ptr_;
alloc_ptr_ += bytes;
alloc_bytes_remaining_ -= bytes;
return result;
}
// 创建一个新的 block
return AllocateFallback(bytes);
}
实现
static const int kBlockSize = 4096;
Arena::Arena()
: alloc_ptr_(nullptr), alloc_bytes_remaining_(0), memory_usage_(0) {}
Arena::~Arena() {
for (size_t i = 0; i < blocks_.size(); i++) {
delete[] blocks_[i];
}
}
// 分配新的 block 给 bytes
char* Arena::AllocateFallback(size_t bytes) {
if (bytes > kBlockSize / 4) {
// Object is more than a quarter of our block size. Allocate it separately
// to avoid wasting too much space in leftover bytes.
// 所需内存大于 1/4 个 block,分配一个 block 后只剩余不到 3/4 的大小,不利于后续在这个 block 里继续分配,所以单独分配一个匹配大小的 block
char* result = AllocateNewBlock(bytes);
return result;
}
// 尽量分配大一点的 block,这样剩下的还有更多可供下次分配
// We waste the remaining space in the current block.
alloc_ptr_ = AllocateNewBlock(kBlockSize);
alloc_bytes_remaining_ = kBlockSize;
char* result = alloc_ptr_;
alloc_ptr_ += bytes;
// 大于 3/4 个 block 大小
alloc_bytes_remaining_ -= bytes;
return result;
}
// 带对齐的 allocate
char* Arena::AllocateAligned(size_t bytes) {
// 指针大小总是 8,对齐是 8
const int align = (sizeof(void*) > 8) ? sizeof(void*) : 8;
static_assert((align & (align - 1)) == 0,
"Pointer size should be a power of 2");
// 取当前未分配的头指针 alloc_ptr_ 的后 3 个 bit(相当于偏离对齐的大小)
size_t current_mod = reinterpret_cast<uintptr_t>(alloc_ptr_) & (align - 1);
// 偏离大小 current_mod 是 0 表示没有偏移,是对齐的,需要填充的 slop 是 0,否则 slop 则是需要填充多少才对齐到 align
size_t slop = (current_mod == 0 ? 0 : align - current_mod);
// 实际上分配的内存 needed,加上填充
size_t needed = bytes + slop;
char* result;
if (needed <= alloc_bytes_remaining_) {
// 加上填充,返回对齐后的 result
result = alloc_ptr_ + slop;
alloc_ptr_ += needed;
alloc_bytes_remaining_ -= needed;
} else {
// new 分配的内存总是对齐的
// AllocateFallback always returned aligned memory
result = AllocateFallback(bytes);
}
// 检查返回的结果一定是对齐的
assert((reinterpret_cast<uintptr_t>(result) & (align - 1)) == 0);
return result;
}
char* Arena::AllocateNewBlock(size_t block_bytes) {
char* result = new char[block_bytes];
blocks_.push_back(result);
// 还算上 blocks_ 内指针的大小 sizeof(char*)
memory_usage_.fetch_add(block_bytes + sizeof(char*),
std::memory_order_relaxed);
return result;
}
Hash
快速的 hash
- 非加密的
- 快速
- 有点像 murmur hash
// The FALLTHROUGH_INTENDED macro can be used to annotate implicit fall-through
// between switch labels. The real definition should be provided externally.
// This one is a fallback version for unsupported compilers.
#ifndef FALLTHROUGH_INTENDED
#define FALLTHROUGH_INTENDED \
do { \
} while (0)
#endif
uint32_t Hash(const char* data, size_t n, uint32_t seed) {
// Similar to murmur hash
const uint32_t m = 0xc6a4a793;
const uint32_t r = 24;
const char* limit = data + n;
uint32_t h = seed ^ (n * m);
// Pick up four bytes at a time
while (data + 4 <= limit) {
uint32_t w = DecodeFixed32(data);
data += 4;
h += w;
h *= m;
h ^= (h >> 16);
}
// Pick up remaining bytes
switch (limit - data) {
case 3:
h += static_cast<uint8_t>(data[2]) << 16;
FALLTHROUGH_INTENDED;
case 2:
h += static_cast<uint8_t>(data[1]) << 8;
FALLTHROUGH_INTENDED;
case 1:
h += static_cast<uint8_t>(data[0]);
h *= m;
h ^= (h >> r);
break;
}
return h;
}
Slice
切片
结构类似
struct Slice {
const char *data;
size_t size_;
};
class LEVELDB_EXPORT Slice {
public:
// Create an empty slice.
Slice() : data_(""), size_(0) {}
// Create a slice that refers to d[0,n-1].
Slice(const char* d, size_t n) : data_(d), size_(n) {}
// Create a slice that refers to the contents of "s"
Slice(const std::string& s) : data_(s.data()), size_(s.size()) {}
// Create a slice that refers to s[0,strlen(s)-1]
Slice(const char* s) : data_(s), size_(strlen(s)) {}
// Intentionally copyable.
Slice(const Slice&) = default;
Slice& operator=(const Slice&) = default;
// Return a pointer to the beginning of the referenced data
const char* data() const { return data_; }
// Return the length (in bytes) of the referenced data
size_t size() const { return size_; }
// Return true iff the length of the referenced data is zero
bool empty() const { return size_ == 0; }
// Return the ith byte in the referenced data.
// REQUIRES: n < size()
char operator[](size_t n) const {
assert(n < size());
return data_[n];
}
// Change this slice to refer to an empty array
void clear() {
data_ = "";
size_ = 0;
}
// Drop the first "n" bytes from this slice.
void remove_prefix(size_t n) {
assert(n <= size());
data_ += n;
size_ -= n;
}
// Return a string that contains the copy of the referenced data.
std::string ToString() const { return std::string(data_, size_); }
// Three-way comparison. Returns value:
// < 0 iff "*this" < "b",
// == 0 iff "*this" == "b",
// > 0 iff "*this" > "b"
int compare(const Slice& b) const;
// Return true iff "x" is a prefix of "*this"
bool starts_with(const Slice& x) const {
return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0));
}
private:
const char* data_;
size_t size_;
};
inline bool operator==(const Slice& x, const Slice& y) {
return ((x.size() == y.size()) &&
(memcmp(x.data(), y.data(), x.size()) == 0));
}
inline bool operator!=(const Slice& x, const Slice& y) { return !(x == y); }
inline int Slice::compare(const Slice& b) const {
const size_t min_len = (size_ < b.size_) ? size_ : b.size_;
int r = memcmp(data_, b.data_, min_len);
if (r == 0) {
if (size_ < b.size_)
r = -1;
else if (size_ > b.size_)
r = +1;
}
return r;
}
Bloom
布隆过滤器
- 使用上面的 Hash 作为第一层 Hash
- 然后根据上面的 Hash 值生成一个 delta,每间隔 delta 放入布隆过滤器中
- 也就是说只有一个 Hash 函数
- 不能删除某个 key,只能重建整个过滤器(Cuckoo Filter:喵喵)
- filter policy,一个纯抽象类,定义过滤的规则
class LEVELDB_EXPORT FilterPolicy {
public:
virtual ~FilterPolicy();
// Return the name of this policy. Note that if the filter encoding
// changes in an incompatible way, the name returned by this method
// must be changed. Otherwise, old incompatible filters may be
// passed to methods of this type.
virtual const char* Name() const = 0;
// 从 keys 里选 n 个 key (keys[0,n-1]) 创建一个 filter 推到 dst 里
// keys[0,n-1] contains a list of keys (potentially with duplicates)
// that are ordered according to the user supplied comparator.
// Append a filter that summarizes keys[0,n-1] to *dst.
//
// Warning: do not change the initial contents of *dst. Instead,
// append the newly constructed filter to *dst.
virtual void CreateFilter(const Slice* keys, int n,
std::string* dst) const = 0;
// 从上面的 dst 里切出来一个 slice (filter) 并检查某个 key 存不存在
// "filter" contains the data appended by a preceding call to
// CreateFilter() on this class. This method must return true if
// the key was in the list of keys passed to CreateFilter().
// This method may return true or false if the key was not on the
// list, but it should aim to return false with a high probability.
virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0;
};
// Return a new filter policy that uses a bloom filter with approximately
// the specified number of bits per key. A good value for bits_per_key
// is 10, which yields a filter with ~ 1% false positive rate.
//
// Callers must delete the result after any database that is using the
// result has been closed.
//
// Note: if you are using a custom comparator that ignores some parts
// of the keys being compared, you must not use NewBloomFilterPolicy()
// and must provide your own FilterPolicy that also ignores the
// corresponding parts of the keys. For example, if the comparator
// ignores trailing spaces, it would be incorrect to use a
// FilterPolicy (like NewBloomFilterPolicy) that does not ignore
// trailing spaces in keys.
LEVELDB_EXPORT const FilterPolicy* NewBloomFilterPolicy(int bits_per_key);
} // namespace leveldb
- Bloom Filter 实现
static uint32_t BloomHash(const Slice& key) {
return Hash(key.data(), key.size(), 0xbc9f1d34);
}
class BloomFilterPolicy : public FilterPolicy {
public:
explicit BloomFilterPolicy(int bits_per_key) : bits_per_key_(bits_per_key) {
// We intentionally round down to reduce probing cost a little bit
k_ = static_cast<size_t>(bits_per_key * 0.69); // 0.69 =~ ln(2)
if (k_ < 1) k_ = 1;
if (k_ > 30) k_ = 30;
}
const char* Name() const override { return "leveldb.BuiltinBloomFilter2"; }
void CreateFilter(const Slice* keys, int n, std::string* dst) const override {
// Compute bloom filter size (in both bits and bytes)
size_t bits = n * bits_per_key_;
// For small n, we can see a very high false positive rate. Fix it
// by enforcing a minimum bloom filter length.
// bits 最小是 64 个,不然有很高 FP
if (bits < 64) bits = 64;
// 对齐成字节数,bits 为 8 的整数倍
size_t bytes = (bits + 7) / 8;
bits = bytes * 8;
const size_t init_size = dst->size();
dst->resize(init_size + bytes, 0);
// 先保存个 k 到最后,表示每个 key hash 了 k 轮
dst->push_back(static_cast<char>(k_)); // Remember # of probes in filter
// 中间这一块就是 bloom filter 的存储
char* array = &(*dst)[init_size];
// 对每个 key 计算一次
for (int i = 0; i < n; i++) {
// Use double-hashing to generate a sequence of hash values.
// See analysis in [Kirsch,Mitzenmacher 2006].
// 第一次 Hash,可以很好地混合一个 key 的各个字符,得到一个均匀的结果,但比较慢
uint32_t h = BloomHash(keys[i]);
// ??? 把 hash 值 rotate 17 个 bit,double hashing,以后每轮都加上这个 delta 形成新的 hash,一个 key 的 _k 轮,每一轮的距离相等
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k_; j++) {
// hash 值表示所在的 bit 位置
const uint32_t bitpos = h % bits;
array[bitpos / 8] |= (1 << (bitpos % 8));
// 第二次 hash,算个加法,最快的 Hash(确信
h += delta;
}
}
}
bool KeyMayMatch(const Slice& key, const Slice& bloom_filter) const override {
const size_t len = bloom_filter.size();
if (len < 2) return false;
const char* array = bloom_filter.data();
const size_t bits = (len - 1) * 8;
// Use the encoded k so that we can read filters generated by
// bloom filters created using different parameters.
// 从末尾读出来 k
const size_t k = array[len - 1];
if (k > 30) {
// Reserved for potentially new encodings for short bloom filters.
// Consider it a match.
return true;
}
// 查看 k 个 bit 槽是否匹配
uint32_t h = BloomHash(key);
const uint32_t delta = (h >> 17) | (h << 15); // Rotate right 17 bits
for (size_t j = 0; j < k; j++) {
const uint32_t bitpos = h % bits;
if ((array[bitpos / 8] & (1 << (bitpos % 8))) == 0) return false;
h += delta;
}
return true;
}
private:
size_t bits_per_key_;
size_t k_;
};
MutexLock
RAII 包装的 mutex,防止忘了 Unlock
class SCOPED_LOCKABLE MutexLock {
public:
explicit MutexLock(port::Mutex* mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) {
this->mu_->Lock();
}
~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); }
// 不允许值传递
MutexLock(const MutexLock&) = delete;
MutexLock& operator=(const MutexLock&) = delete;
private:
port::Mutex* const mu_;
};
以下都是 clang 提供的宏,可以帮助 clang 分析是否有线程安全问题:
- 数据竞争
- 锁泄露
- 死锁
- ... 详情见:https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
- SCOPED_LOCKABLE 宏
#define SCOPED_LOCKABLE THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
- EXCLUSIVE_LOCK_FUNCTION 宏
#define EXCLUSIVE_LOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(exclusive_lock_function(__VA_ARGS__))
- UNLOCK_FUNCTION 宏
#define UNLOCK_FUNCTION(...) \
THREAD_ANNOTATION_ATTRIBUTE__(unlock_function(__VA_ARGS__))
Cache
Cache 统一抽象
- 对外的 cache api,需要实现是线程安全的
- 访问某个 cache entry 需要带个 cache handle,访问完了需要 release 这个 handle
定义
class LEVELDB_EXPORT Cache {
public:
Cache() = default;
Cache(const Cache&) = delete;
Cache& operator=(const Cache&) = delete;
// Destroys all existing entries by calling the "deleter"
// function that was passed to the constructor.
virtual ~Cache();
// Opaque handle to an entry stored in the cache.
struct Handle {};
// Insert a mapping from key->value into the cache and assign it
// the specified charge against the total cache capacity.
//
// Returns a handle that corresponds to the mapping. The caller
// must call this->Release(handle) when the returned mapping is no
// longer needed.
//
// When the inserted entry is no longer needed, the key and
// value will be passed to "deleter".
virtual Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) = 0;
// If the cache has no mapping for "key", returns nullptr.
//
// Else return a handle that corresponds to the mapping. The caller
// must call this->Release(handle) when the returned mapping is no
// longer needed.
virtual Handle* Lookup(const Slice& key) = 0;
// Release a mapping returned by a previous Lookup().
// REQUIRES: handle must not have been released yet.
// REQUIRES: handle must have been returned by a method on *this.
virtual void Release(Handle* handle) = 0;
// Return the value encapsulated in a handle returned by a
// successful Lookup().
// REQUIRES: handle must not have been released yet.
// REQUIRES: handle must have been returned by a method on *this.
virtual void* Value(Handle* handle) = 0;
// If the cache contains entry for key, erase it. Note that the
// underlying entry will be kept around until all existing handles
// to it have been released.
virtual void Erase(const Slice& key) = 0;
// Return a new numeric id. May be used by multiple clients who are
// sharing the same cache to partition the key space. Typically the
// client will allocate a new id at startup and prepend the id to
// its cache keys.
virtual uint64_t NewId() = 0;
// Remove all cache entries that are not actively in use. Memory-constrained
// applications may wish to call this method to reduce memory usage.
// Default implementation of Prune() does nothing. Subclasses are strongly
// encouraged to override the default implementation. A future release of
// leveldb may change Prune() to a pure abstract method.
virtual void Prune() {}
// Return an estimate of the combined charges of all elements stored in the
// cache.
virtual size_t TotalCharge() const = 0;
};
LRU Cache
- 每条 cache entry 有个
in_cache
字段表示是否在 cache 里- entry 被传入
Erase()
或者Insert()
一个重复 key 的 entry,in_cache
才会变成false
(cache 整个被销毁了也会)
- entry 被传入
- LRU cache 里有两个链表,一条 entry 只在其中一个里
in-use
:最近被引用的 entry,没有顺序(这个列表用于不变性检查。如果我们移除这个检查,原本应在这个列表上的元素可能会被当作断开的单元素列表保留下来。)LRU
:常规的链表,保存着一些暂未被引用的 entry,按照最近使用进行排序- 根据 entry 的
Ref
和Unref
的引用计数确定这个 entry 放在哪个链表,哪个位置上。
// LRU cache implementation
//
// Cache entries have an "in_cache" boolean indicating whether the cache has a
// reference on the entry. The only ways that this can become false without the
// entry being passed to its "deleter" are via Erase(), via Insert() when
// an element with a duplicate key is inserted, or on destruction of the cache.
//
// The cache keeps two linked lists of items in the cache. All items in the
// cache are in one list or the other, and never both. Items still referenced
// by clients but erased from the cache are in neither list. The lists are:
// - in-use: contains the items currently referenced by clients, in no
// particular order. (This list is used for invariant checking. If we
// removed the check, elements that would otherwise be on this list could be
// left as disconnected singleton lists.)
// - LRU: contains the items not currently referenced by clients, in LRU order
// Elements are moved between these lists by the Ref() and Unref() methods,
// when they detect an element in the cache acquiring or losing its only
// external reference.
- LRUHandle
- LRU entry,侵入性数据结构,分配在堆上
- 长度不固定(长度不确定的 key 会被贴在最后)
- 每个都包含了 prev 和 next 指针用于构建双端链表。
- 还有一个 next_hash 指针,用于构建拉链的 hash 表
// An entry is a variable length heap-allocated structure. Entries
// are kept in a circular doubly linked list ordered by access time.
struct LRUHandle {
void* value;
void (*deleter)(const Slice&, void* value);
LRUHandle* next_hash;
LRUHandle* next;
LRUHandle* prev;
size_t charge; // TODO(opt): Only allow uint32_t?
size_t key_length;
bool in_cache; // Whether entry is in the cache.
uint32_t refs; // References, including cache reference, if present.
uint32_t hash; // Hash of key(); used for fast sharding and comparisons
char key_data[1]; // Beginning of key,看似长度是 1,但其实后面还有,所以不能简单用 new 分配内存,得手动 malloc(sizeof(LRUHandle + key_length_ - 1)),不用指针,cache 友好
Slice key() const {
// next is only equal to this if the LRU handle is the list head of an
// empty list. List heads never have meaningful keys.
assert(next != this);
return Slice(key_data, key_length);
}
};
- HashTable
一个外部提供 hash 的 HashTable(,性能不错
- 具体实现是使用不带加密的 Hash(是否有 hash 碰撞攻击问题?)
- 拉链法解决冲突
- 扩容因子是1(满了才扩容)
// We provide our own simple hash table since it removes a whole bunch
// of porting hacks and is also faster than some of the built-in hash
// table implementations in some of the compiler/runtime combinations
// we have tested. E.g., readrandom speeds up by ~5% over the g++
// 4.4.3's builtin hashtable.
class HandleTable {
public:
HandleTable() : length_(0), elems_(0), list_(nullptr) { Resize(); }
~HandleTable() { delete[] list_; }
LRUHandle* Lookup(const Slice& key, uint32_t hash) {
return *FindPointer(key, hash);
}
LRUHandle* Insert(LRUHandle* h) {
LRUHandle** ptr = FindPointer(h->key(), h->hash);
LRUHandle* old = *ptr;
// 尝试继承旧的 next_hash
h->next_hash = (old == nullptr ? nullptr : old->next_hash);
*ptr = h;
if (old == nullptr) {
// 没有旧的,看看要不要扩容
++elems_;
if (elems_ > length_) { // 扩容因子是 1(elems_ > 1 * length)
// Since each cache entry is fairly large, we aim for a small
// average linked list length (<= 1).
Resize();
}
}
// 返回旧的
return old;
}
LRUHandle* Remove(const Slice& key, uint32_t hash) {
LRUHandle** ptr = FindPointer(key, hash);
LRUHandle* result = *ptr;
if (result != nullptr) {
// 找到 hash 冲突链里下一个元素,往前移一位
*ptr = result->next_hash;
--elems_;
}
return result;
}
private:
// The table consists of an array of buckets where each bucket is
// a linked list of cache entries that hash into the bucket.
uint32_t length_;
uint32_t elems_;
LRUHandle** list_; // hash 槽,拉链解决冲突
// Return a pointer to slot that points to a cache entry that
// matches key/hash. If there is no such cache entry, return a
// pointer to the trailing slot in the corresponding linked list.
LRUHandle** FindPointer(const Slice& key, uint32_t hash) {
LRUHandle** ptr = &list_[hash & (length_ - 1)];
while (*ptr != nullptr && ((*ptr)->hash != hash || key != (*ptr)->key())) {
// 冲突了,按着链子找 hash 匹配的
ptr = &(*ptr)->next_hash;
}
return ptr;
}
void Resize() {
uint32_t new_length = 4;
while (new_length < elems_) {
// 指数扩容
new_length *= 2;
}
LRUHandle** new_list = new LRUHandle*[new_length];
// 先清零,避免后续未初始化内存访问 UB
memset(new_list, 0, sizeof(new_list[0]) * new_length);
uint32_t count = 0;
for (uint32_t i = 0; i < length_; i++) {
LRUHandle* h = list_[i];
while (h != nullptr) {
LRUHandle* next = h->next_hash;
uint32_t hash = h->hash;
// 用旧的 hash 找到新的槽
LRUHandle** ptr = &new_list[hash & (new_length - 1)];
// 拉条新链
h->next_hash = *ptr;
// 槽指向新的头
*ptr = h;
h = next;
count++;
}
}
assert(elems_ == count);
delete[] list_;
list_ = new_list;
length_ = new_length;
}
};
- LRUCache
- 一片 LRUCache,提供细粒度的锁控制
// A single shard of sharded cache.
class LRUCache {
public:
LRUCache();
~LRUCache();
// Separate from constructor so caller can easily make an array of LRUCache
void SetCapacity(size_t capacity) { capacity_ = capacity; }
// Like Cache methods, but with an extra "hash" parameter.
Cache::Handle* Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key, void* value));
Cache::Handle* Lookup(const Slice& key, uint32_t hash);
void Release(Cache::Handle* handle);
void Erase(const Slice& key, uint32_t hash);
void Prune();
size_t TotalCharge() const {
MutexLock l(&mutex_);
return usage_;
}
private:
void LRU_Remove(LRUHandle* e);
void LRU_Append(LRUHandle* list, LRUHandle* e);
void Ref(LRUHandle* e);
void Unref(LRUHandle* e);
bool FinishErase(LRUHandle* e) EXCLUSIVE_LOCKS_REQUIRED(mutex_);
// Initialized before use.
size_t capacity_;
// mutex_ protects the following state.
mutable port::Mutex mutex_;
size_t usage_ GUARDED_BY(mutex_);
// Dummy head of LRU list.
// lru.prev is newest entry, lru.next is oldest entry.
// Entries have refs==1 and in_cache==true.
LRUHandle lru_ GUARDED_BY(mutex_);
// Dummy head of in-use list.
// Entries are in use by clients, and have refs >= 2 and in_cache==true.
LRUHandle in_use_ GUARDED_BY(mutex_);
HandleTable table_ GUARDED_BY(mutex_);
};
LRUCache::LRUCache() : capacity_(0), usage_(0) {
// Make empty circular linked lists.
// 初始化两个链表的 dummy head
lru_.next = &lru_;
lru_.prev = &lru_;
in_use_.next = &in_use_;
in_use_.prev = &in_use_;
}
LRUCache::~LRUCache() {
// 析构时检查 in_use 是不是空的
assert(in_use_.next == &in_use_); // Error if caller has an unreleased handle
for (LRUHandle* e = lru_.next; e != &lru_;) {
LRUHandle* next = e->next;
assert(e->in_cache);
e->in_cache = false;
// 总还剩一个在 cache 里被引用
assert(e->refs == 1); // Invariant of lru_ list.
Unref(e);
e = next;
}
}
void LRUCache::Ref(LRUHandle* e) {
if (e->refs == 1 && e->in_cache) { // If on lru_ list, move to in_use_ list.
// 移动到 in_use_ 里
LRU_Remove(e);
LRU_Append(&in_use_, e);
}
e->refs++;
}
void LRUCache::Unref(LRUHandle* e) {
assert(e->refs > 0);
e->refs--;
if (e->refs == 0) { // Deallocate.
assert(!e->in_cache);
(*e->deleter)(e->key(), e->value);
free(e);
} else if (e->in_cache && e->refs == 1) {
// No longer in use; move to lru_ list.
// 移动回 lru_ 里,放到最后,表示最新的操作对象
// lru_ 里的 refs 总是只剩下一个
LRU_Remove(e);
LRU_Append(&lru_, e);
}
}
void LRUCache::LRU_Remove(LRUHandle* e) {
e->next->prev = e->prev;
e->prev->next = e->next;
}
void LRUCache::LRU_Append(LRUHandle* list, LRUHandle* e) {
// Make "e" newest entry by inserting just before *list
e->next = list;
e->prev = list->prev;
e->prev->next = e;
e->next->prev = e;
}
Cache::Handle* LRUCache::Lookup(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_); // 先上分片锁
LRUHandle* e = table_.Lookup(key, hash);
if (e != nullptr) {
Ref(e);
}
// 隐藏掉细节,转换成 Cache::Handle 空结构体
return reinterpret_cast<Cache::Handle*>(e);
}
void LRUCache::Release(Cache::Handle* handle) {
MutexLock l(&mutex_);
Unref(reinterpret_cast<LRUHandle*>(handle));
}
Cache::Handle* LRUCache::Insert(const Slice& key, uint32_t hash, void* value,
size_t charge,
void (*deleter)(const Slice& key,
void* value)) {
MutexLock l(&mutex_);
// 这里得手动 malloc,LRUHandle 的大小是不固定的,sizeof 算出来是最基本的大小,得算上 key 大小
LRUHandle* e =
reinterpret_cast<LRUHandle*>(malloc(sizeof(LRUHandle) - 1 + key.size()));
e->value = value;
e->deleter = deleter;
e->charge = charge;
e->key_length = key.size();
e->hash = hash;
e->in_cache = false;
e->refs = 1; // for the returned handle.
// 把 key 拷贝到结构体最后
std::memcpy(e->key_data, key.data(), key.size());
if (capacity_ > 0) {
e->refs++; // for the cache's reference.
e->in_cache = true;
LRU_Append(&in_use_, e);
usage_ += charge; // 加的是 charge,不是 1
FinishErase(table_.Insert(e));
} else { // don't cache. (capacity_==0 is supported and turns off caching.)
// next is read by key() in an assert, so it must be initialized
// capacity_ 是 0 就关闭 cache,有成本的抽象
e->next = nullptr;
}
// 从头开始删掉旧的
while (usage_ > capacity_ && lru_.next != &lru_) {
LRUHandle* old = lru_.next;
// 确保 refs 只剩下一个,在 lru_ 里的总是只剩下一个 ref
assert(old->refs == 1);
bool erased = FinishErase(table_.Remove(old->key(), old->hash));
if (!erased) { // to avoid unused variable when compiled NDEBUG
assert(erased);
}
}
return reinterpret_cast<Cache::Handle*>(e);
}
// If e != nullptr, finish removing *e from the cache; it has already been
// removed from the hash table. Return whether e != nullptr.
bool LRUCache::FinishErase(LRUHandle* e) {
if (e != nullptr) {
assert(e->in_cache);
LRU_Remove(e);
// in_cache 改成 false
e->in_cache = false;
usage_ -= e->charge;
// 可能会被移动到 lru_ 里然后被淘汰
Unref(e);
}
return e != nullptr;
}
void LRUCache::Erase(const Slice& key, uint32_t hash) {
MutexLock l(&mutex_);
FinishErase(table_.Remove(key, hash));
}
void LRUCache::Prune() {
MutexLock l(&mutex_);
while (lru_.next != &lru_) {
LRUHandle* e = lru_.next;
assert(e->refs == 1);
bool erased = FinishErase(table_.Remove(e->key(), e->hash));
if (!erased) { // to avoid unused variable when compiled NDEBUG
assert(erased);
}
}
}
- ShardeLRUCache
- 分片的总体,实现了 Cache
- Cache 的默认实现
static const int kNumShardBits = 4;
static const int kNumShards = 1 << kNumShardBits;
class ShardedLRUCache : public Cache {
private:
LRUCache shard_[kNumShards]; // 一共分 16 片
port::Mutex id_mutex_; // 保护 id
uint64_t last_id_;
static inline uint32_t HashSlice(const Slice& s) {
// 具体的 Hash 函数,seed 固定为 0(key 是内部产生的么?不怕 hash 碰撞攻击?)
return Hash(s.data(), s.size(), 0);
}
// 前 4 个 bit 用于确定哪个分片
static uint32_t Shard(uint32_t hash) { return hash >> (32 - kNumShardBits); }
public:
// id 从 0 开始
explicit ShardedLRUCache(size_t capacity) : last_id_(0) {
const size_t per_shard = (capacity + (kNumShards - 1)) / kNumShards;
for (int s = 0; s < kNumShards; s++) {
shard_[s].SetCapacity(per_shard);
}
}
~ShardedLRUCache() override {}
Handle* Insert(const Slice& key, void* value, size_t charge,
void (*deleter)(const Slice& key, void* value)) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Insert(key, hash, value, charge, deleter);
}
Handle* Lookup(const Slice& key) override {
const uint32_t hash = HashSlice(key);
return shard_[Shard(hash)].Lookup(key, hash);
}
void Release(Handle* handle) override {
LRUHandle* h = reinterpret_cast<LRUHandle*>(handle);
shard_[Shard(h->hash)].Release(handle);
}
void Erase(const Slice& key) override {
const uint32_t hash = HashSlice(key);
shard_[Shard(hash)].Erase(key, hash);
}
void* Value(Handle* handle) override {
return reinterpret_cast<LRUHandle*>(handle)->value;
}
uint64_t NewId() override {
MutexLock l(&id_mutex_);
return ++(last_id_);
}
void Prune() override {
for (int s = 0; s < kNumShards; s++) {
shard_[s].Prune();
}
}
size_t TotalCharge() const override {
size_t total = 0;
for (int s = 0; s < kNumShards; s++) {
total += shard_[s].TotalCharge();
}
return total;
}
};
Coding
定长和变长编码
定长编码很简单,变长编码主要逻辑:
const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value);
inline const char* GetVarint32Ptr(const char* p, const char* limit,
uint32_t* value) {
if (p < limit) {
// 虽然是 uint32,只会看一个 byte
uint32_t result = *(reinterpret_cast<const uint8_t*>(p));
if ((result & 128) == 0) { // 最高位是0说明后面没有变长段了
*value = result;
return p + 1; // 非空表示正常
}
}
// 不然根据 fallback 实现看变长段能拓展到哪里
return GetVarint32PtrFallback(p, limit, value);
}
// fallback 实现
const char* GetVarint32PtrFallback(const char* p, const char* limit,
uint32_t* value) {
uint32_t result = 0;
// 最多 shift 28/7 = 4 个 bytes,每个 byte 最高位表示是否继续拓展
for (uint32_t shift = 0; shift <= 28 && p < limit; shift += 7) {
uint32_t byte = *(reinterpret_cast<const uint8_t*>(p));
p++;
if (byte & 128) {
// More bytes are present
result |= ((byte & 127) << shift);
} else {
result |= (byte << shift);
*value = result;
return reinterpret_cast<const char*>(p);
}
}
return nullptr; // 超过了 shift 限制或者指针超过了 limit,decode 失败
}
// GetVarint64Ptr 类似,最多 shift 9 个 byte
Comparator
比较器
- 头文件
- 线程安全的
- 用于构建 iterator
// A Comparator object provides a total order across slices that are
// used as keys in an sstable or a database. A Comparator implementation
// must be thread-safe since leveldb may invoke its methods concurrently
// from multiple threads.
class LEVELDB_EXPORT Comparator {
public:
virtual ~Comparator();
// Three-way comparison. Returns value:
// < 0 iff "a" < "b",
// == 0 iff "a" == "b",
// > 0 iff "a" > "b"
virtual int Compare(const Slice& a, const Slice& b) const = 0;
// The name of the comparator. Used to check for comparator
// mismatches (i.e., a DB created with one comparator is
// accessed using a different comparator.
//
// The client of this package should switch to a new name whenever
// the comparator implementation changes in a way that will cause
// the relative ordering of any two keys to change.
//
// Names starting with "leveldb." are reserved and should not be used
// by any clients of this package.
virtual const char* Name() const = 0;
// Advanced functions: these are used to reduce the space requirements
// for internal data structures like index blocks.
// If *start < limit, changes *start to a short string in [start,limit).
// Simple comparator implementations may return with *start unchanged,
// i.e., an implementation of this method that does nothing is correct.
virtual void FindShortestSeparator(std::string* start,
const Slice& limit) const = 0;
// Changes *key to a short string >= *key.
// Simple comparator implementations may return with *key unchanged,
// i.e., an implementation of this method that does nothing is correct.
virtual void FindShortSuccessor(std::string* key) const = 0;
};
- BytewiseComparator
- 默认的比较器实现
class BytewiseComparatorImpl : public Comparator {
public:
BytewiseComparatorImpl() = default;
const char* Name() const override { return "leveldb.BytewiseComparator"; }
int Compare(const Slice& a, const Slice& b) const override {
// 按字节顺序比较
return a.compare(b);
}
void FindShortestSeparator(std::string* start,
const Slice& limit) const override {
// Find length of common prefix
size_t min_length = std::min(start->size(), limit.size());
size_t diff_index = 0;
while ((diff_index < min_length) &&
((*start)[diff_index] == limit[diff_index])) {
diff_index++;
}
// 原来 google 也会写这样抽象的代码(
if (diff_index >= min_length) {
// Do not shorten if one string is a prefix of the other
} else {
uint8_t diff_byte = static_cast<uint8_t>((*start)[diff_index]);
if (diff_byte < static_cast<uint8_t>(0xff) &&
diff_byte + 1 < static_cast<uint8_t>(limit[diff_index])) {
// ++ 之后的 start 截断到 diff_index 之后大于 old_start,但要小于 limit (diff_byte + 1 < limit[diff_index])
// 换句话说,在 (start, limit) 之间找一个 start,保证比旧的 start 长度短,比 limit 小
(*start)[diff_index]++;
// 把 start 给截断了
start->resize(diff_index + 1);
assert(Compare(*start, limit) < 0);
}
}
// 总得来看,在 [start, limit) 之间找一个新的更短的 start(用于压缩 key),找不到就返回旧的 start
}
void FindShortSuccessor(std::string* key) const override {
// Find first character that can be incremented
size_t n = key->size();
for (size_t i = 0; i < n; i++) {
const uint8_t byte = (*key)[i];
// 找到第一个不是 255 的数,把它 +1,然后截断,就是最短的后继了
if (byte != static_cast<uint8_t>(0xff)) {
(*key)[i] = byte + 1;
key->resize(i + 1);
return;
}
}
// *key is a run of 0xffs. Leave it alone.
}
};
CRC32
硬件加速 CRC32 or fallback 到纯 CPU 计算
// Determine if the CPU running this program can accelerate the CRC32C
// calculation.
static bool CanAccelerateCRC32C() {
// port::AcceleretedCRC32C returns zero when unable to accelerate.
static const char kTestCRCBuffer[] = "TestCRCBuffer";
static const char kBufSize = sizeof(kTestCRCBuffer) - 1;
static const uint32_t kTestCRCValue = 0xdcbc59fa;
// 居然是运行一遍看看对不对来检查能不能加速...
return port::AcceleratedCRC32C(0, kTestCRCBuffer, kBufSize) == kTestCRCValue;
}
// 主要的函数
uint32_t Extend(uint32_t crc, const char* data, size_t n) {
static bool accelerate = CanAccelerateCRC32C();
if (accelerate) {
return port::AcceleratedCRC32C(crc, data, n);
}
const uint8_t* p = reinterpret_cast<const uint8_t*>(data);
... 接下来是一端奇怪的宏和位操作结合古怪的数表
我看不懂
Env
运行所需的环境统一抽象
helpers/memenv
里有个内存 env 的实现,用于测试
提供文件系统主要抽象
- FileLock
- Logger
- RandomAccessFile (只读)
- SequentialFile (只读)
- WritableFile
- Directory Operations
提供调度的主要操作
- Shedule()
- StartThread()
- NowMicros()
- SleepForMicroseconds()
日志
- Logger
具体实现非常多且 trival,略过
Histogram
直方图,用于 benchmark
- 头文件
class Histogram {
public:
Histogram() {}
~Histogram() {}
void Clear();
void Add(double value);
void Merge(const Histogram& other);
std::string ToString() const;
private:
enum { kNumBuckets = 154 };
double Median() const;
double Percentile(double p) const;
double Average() const;
double StandardDeviation() const;
static const double kBucketLimit[kNumBuckets];
double min_;
double max_;
double num_;
double sum_;
double sum_squares_;
double buckets_[kNumBuckets];
};
- 实现
// 非均匀增加
const double Histogram::kBucketLimit[kNumBuckets] = {
1,
2,
3,
4,
5,
6,
...,
8000000000.0,
9000000000.0,
1e200,
};
void Histogram::Clear() {
// 最小值设置为最大
min_ = kBucketLimit[kNumBuckets - 1];
// 全部清0
max_ = 0;
num_ = 0;
sum_ = 0;
sum_squares_ = 0;
for (int i = 0; i < kNumBuckets; i++) {
buckets_[i] = 0;
}
}
void Histogram::Add(double value) {
// 懒得直接算偏移了,直接线性搜
// Linear search is fast enough for our usage in db_bench
int b = 0;
while (b < kNumBuckets - 1 && kBucketLimit[b] <= value) {
b++;
}
buckets_[b] += 1.0;
if (min_ > value) min_ = value;
if (max_ < value) max_ = value;
num_++;
sum_ += value;
sum_squares_ += (value * value);
}
// 合并其他的直方图
void Histogram::Merge(const Histogram& other) {
if (other.min_ < min_) min_ = other.min_;
if (other.max_ > max_) max_ = other.max_;
num_ += other.num_;
sum_ += other.sum_;
sum_squares_ += other.sum_squares_;
for (int b = 0; b < kNumBuckets; b++) {
buckets_[b] += other.buckets_[b];
}
}
double Histogram::Median() const { return Percentile(50.0); }
// 前 p% 个数大概是多少
double Histogram::Percentile(double p) const {
// 达到 p% 所需的数量
double threshold = num_ * (p / 100.0);
double sum = 0;
for (int b = 0; b < kNumBuckets; b++) {
sum += buckets_[b];
if (sum >= threshold) {
// Scale linearly within this bucket
double left_point = (b == 0) ? 0 : kBucketLimit[b - 1];
double right_point = kBucketLimit[b];
double left_sum = sum - buckets_[b];
double right_sum = sum;
double pos = (threshold - left_sum) / (right_sum - left_sum);
double r = left_point + (right_point - left_point) * pos;
// clamp 到正确的范围
if (r < min_) r = min_;
if (r > max_) r = max_;
return r;
}
}
return max_;
}
double Histogram::Average() const {
if (num_ == 0.0) return 0;
return sum_ / num_;
}
// 标准差
double Histogram::StandardDeviation() const {
if (num_ == 0.0) return 0;
double variance = (sum_squares_ * num_ - sum_ * sum_) / (num_ * num_);
return sqrt(variance);
}
// 打印直方图
std::string Histogram::ToString() const {
std::string r;
char buf[200];
std::snprintf(buf, sizeof(buf), "Count: %.0f Average: %.4f StdDev: %.2f
",
num_, Average(), StandardDeviation());
r.append(buf);
std::snprintf(buf, sizeof(buf), "Min: %.4f Median: %.4f Max: %.4f
",
(num_ == 0.0 ? 0.0 : min_), Median(), max_);
r.append(buf);
r.append("------------------------------------------------------
");
const double mult = 100.0 / num_;
double sum = 0;
for (int b = 0; b < kNumBuckets; b++) {
if (buckets_[b] <= 0.0) continue;
sum += buckets_[b];
std::snprintf(buf, sizeof(buf), "[ %7.0f, %7.0f ) %7.0f %7.3f%% %7.3f%% ",
((b == 0) ? 0.0 : kBucketLimit[b - 1]), // left
kBucketLimit[b], // right
buckets_[b], // count
mult * buckets_[b], // percentage
mult * sum); // cumulative percentage
r.append(buf);
// Add hash marks based on percentage; 20 marks for 100%.
int marks = static_cast<int>(20 * (buckets_[b] / num_) + 0.5);
r.append(marks, '#');
r.push_back('
');
}
return r;
}
Logging
日志格式化的东西
trivial 没什么好看的
NoDestructor
阻止析构
- 头文件
// Wraps an instance whose destructor is never called.
//
// This is intended for use with function-level static variables.
template <typename InstanceType>
class NoDestructor {
public:
// explicit 显示接受各个参数
template <typename... ConstructorArgTypes>
explicit NoDestructor(ConstructorArgTypes&&... constructor_args) {
// 所提供的变量必须足够大,存的下这个类型
static_assert(sizeof(instance_storage_) >= sizeof(InstanceType),
"instance_storage_ is not large enough to hold the instance");
// 所提供的变量的对齐也需要满足这个类型所需的对齐
static_assert(
alignof(decltype(instance_storage_)) >= alignof(InstanceType),
"instance_storage_ does not meet the instance's alignment requirement");
// new [dst] Type(...) 语法,不会分配一块新的内存,而是直接在 dst 上构造这个类型
new (&instance_storage_)
InstanceType(std::forward<ConstructorArgTypes>(constructor_args)...);
}
// 默认的析构,不会调用 InstanceType 的析构函数
~NoDestructor() = default;
// 只能引用传递
NoDestructor(const NoDestructor&) = delete;
NoDestructor& operator=(const NoDestructor&) = delete;
InstanceType* get() {
return reinterpret_cast<InstanceType*>(&instance_storage_);
}
private:
// aligned_storage<size, align>::type 会返回一个满足 size 和 align 要求的内存空间的类型
// typename 表示这是基于模板的一个类型
typename std::aligned_storage<sizeof(InstanceType),
alignof(InstanceType)>::type instance_storage_;
};
- 测试方法
struct DoNotDestruct {
public:
DoNotDestruct(uint32_t a, uint64_t b) : a(a), b(b) {}
~DoNotDestruct() { std::abort(); } // 如果调用析构了就 abort 进程
// Used to check constructor argument forwarding.
uint32_t a;
uint64_t b;
};
Options
选项,由用户制定配置
可供选择的有
- CompressionType: 默认是 Snappy
- NoCompression
- Snappy
- Zstd
- compression level
- Comparator: 默认是字节比较器,按字节顺序比较
- 小开关
- create if missing: db 不存在就创建一个
- error if exists: db 存在时报错
- paranoid checks: 会积极检查数据是否错误,并fail fast
- reuse logs: 实验性,复用 log
- verify checksum: 读取时检查 checksum,默认是 false
- fill cache: 读取时是否可以缓存,默认是 true
- sync: 写入返回前调用 fsync 刷盘,默认是 false
- Env: 默认为当前检测到的运行环境
- Logger
- Block Cache
- (默认是分片LRU)
- FilterPolicy: 过滤不存在的 get 减少磁盘读取次数
- (默认是布隆过滤器)
- 数字选项
- write_buffer_size: memtable 大小?
- max open files
- (默认 1000,linux 默认1024)
- block size: 未压缩前的块大小,可以动态修改
- (顺序读更大的更好)
- (随机读更小的更好)
- block restart interval:
- (见 table format,默认 16)
- max file size
- (看文件系统对目录文件数量的性能测试)
Random
简单的随机数生成器,不保证质量
// A very simple random number generator. Not especially good at
// generating truly random bits, but good enough for our needs in this
// package.
class Random {
private:
uint32_t seed_;
public:
explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) {
// Avoid bad seeds.
if (seed_ == 0 || seed_ == 2147483647L) {
seed_ = 1;
}
}
// seed = (seed * A) % M
uint32_t Next() {
static const uint32_t M = 2147483647L; // 2^31-1
static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0
// We are computing
// seed_ = (seed_ * A) % M, where M = 2^31-1
//
// seed_ must not be zero or M, or else all subsequent computed values
// will be zero or M respectively. For all other values, seed_ will end
// up cycling through every number in [1,M-1]
uint64_t product = seed_ * A;
// Compute (product % M) using the fact that ((x << 31) % M) == x.
seed_ = static_cast<uint32_t>((product >> 31) + (product & M));
// The first reduction may overflow by 1 bit, so we may need to
// repeat. mod == M is not possible; using > allows the faster
// sign-bit-based test.
if (seed_ > M) {
seed_ -= M;
}
return seed_;
}
// Returns a uniformly distributed value in the range [0..n-1]
// REQUIRES: n > 0
uint32_t Uniform(int n) { return Next() % n; }
// Randomly returns true ~"1/n" of the time, and false otherwise.
// REQUIRES: n > 0
bool OneIn(int n) { return (Next() % n) == 0; }
// Skewed: pick "base" uniformly from range [0,max_log] and then
// return "base" random bits. The effect is to pick a number in the
// range [0,2^max_log-1] with exponential bias towards smaller numbers.
uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); }
};
Status
状态码:code + message 组成
Code:
- OK
- NotFound
- Corruption
- NotSupported
- InvalidArgument
- IOError
Iterator
- LevelDB reader 的主要抽象
- 双向的
- 带个回收函数的链表,用于 reader 析构时回收掉某些东西
定义
class LEVELDB_EXPORT Iterator {
public:
Iterator();
Iterator(const Iterator&) = delete;
Iterator& operator=(const Iterator&) = delete;
virtual ~Iterator();
// An iterator is either positioned at a key/value pair, or
// not valid. This method returns true iff the iterator is valid.
virtual bool Valid() const = 0;
// Position at the first key in the source. The iterator is Valid()
// after this call iff the source is not empty.
virtual void SeekToFirst() = 0;
// Position at the last key in the source. The iterator is
// Valid() after this call iff the source is not empty.
virtual void SeekToLast() = 0;
// Position at the first key in the source that is at or past target.
// The iterator is Valid() after this call iff the source contains
// an entry that comes at or past target.
virtual void Seek(const Slice& target) = 0;
// Moves to the next entry in the source. After this call, Valid() is
// true iff the iterator was not positioned at the last entry in the source.
// REQUIRES: Valid()
virtual void Next() = 0;
// Moves to the previous entry in the source. After this call, Valid() is
// true iff the iterator was not positioned at the first entry in source.
// REQUIRES: Valid()
virtual void Prev() = 0;
// Return the key for the current entry. The underlying storage for
// the returned slice is valid only until the next modification of
// the iterator.
// REQUIRES: Valid()
virtual Slice key() const = 0;
// Return the value for the current entry. The underlying storage for
// the returned slice is valid only until the next modification of
// the iterator.
// REQUIRES: Valid()
virtual Slice value() const = 0;
// If an error has occurred, return it. Else return an ok status.
virtual Status status() const = 0;
// Clients are allowed to register function/arg1/arg2 triples that
// will be invoked when this iterator is destroyed.
//
// Note that unlike all of the preceding methods, this method is
// not abstract and therefore clients should not override it.
// 回收策略
using CleanupFunction = void (*)(void* arg1, void* arg2);
void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2);
private:
// Cleanup functions are stored in a single-linked list.
// The list's head node is inlined in the iterator.
struct CleanupNode {
// True if the node is not used. Only head nodes might be unused.
bool IsEmpty() const { return function == nullptr; }
// Invokes the cleanup function.
void Run() {
assert(function != nullptr);
(*function)(arg1, arg2);
}
// The head node is used if the function pointer is not null.
CleanupFunction function;
void* arg1;
void* arg2;
CleanupNode* next;
};
// 以一种链表方式把各种回收策略串起来
CleanupNode cleanup_head_;
};
// Return an empty iterator (yields nothing).
// 调用 Next/Prev 会 panic
LEVELDB_EXPORT Iterator* NewEmptyIterator();
// Return an empty iterator with the specified status.
// 为了保证一些方法的简洁
LEVELDB_EXPORT Iterator* NewErrorIterator(const Status& status);
实现
Iterator::Iterator() {
cleanup_head_.function = nullptr;
cleanup_head_.next = nullptr;
}
Iterator::~Iterator() {
// 析构时执行回收链
if (!cleanup_head_.IsEmpty()) {
cleanup_head_.Run();
for (CleanupNode* node = cleanup_head_.next; node != nullptr;) {
node->Run();
CleanupNode* next_node = node->next;
delete node;
node = next_node;
}
}
}
// 注册回收策略,默认的实现,不应该被重写
void Iterator::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) {
assert(func != nullptr);
CleanupNode* node;
if (cleanup_head_.IsEmpty()) {
node = &cleanup_head_;
} else {
node = new CleanupNode();
node->next = cleanup_head_.next;
cleanup_head_.next = node;
}
node->function = func;
node->arg1 = arg1;
node->arg2 = arg2;
}