inlinechar* Arena::Allocate(size_t bytes){ // The semantics of what to return are a bit messy if we allow // 0-byte allocations, so we disallow them here (we don't need // them for our internal use). assert(bytes > 0); if (bytes <= alloc_bytes_remaining_) { char* result = alloc_ptr_; alloc_ptr_ += bytes; alloc_bytes_remaining_ -= bytes; return result; } returnAllocateFallback(bytes); } char* Arena::AllocateFallback(size_t bytes){ if (bytes > kBlockSize / 4) { // Object is more than a quarter of our block size. Allocate it separately // to avoid wasting too much space in leftover bytes. // 大于 1/4 block 单独分配一个 特定大小的 block,避免浪费 char* result = AllocateNewBlock(bytes); return result; }
// We waste the remaining space in the current block. alloc_ptr_ = AllocateNewBlock(kBlockSize); alloc_bytes_remaining_ = kBlockSize;
Node* NewNode(const Key& key, int height); intRandomHeight(); boolEqual(const Key& a, const Key& b)const{ return (compare_(a, b) == 0); }
// Return true if key is greater than the data stored in "n" boolKeyIsAfterNode(const Key& key, Node* n)const;
// Return the earliest node that comes at or after key. // Return nullptr if there is no such node. // // If prev is non-null, fills prev[level] with pointer to previous // node at "level" for every level in [0..max_height_-1]. Node* FindGreaterOrEqual(const Key& key, Node** prev)const;
// Return the latest node with a key < key. // Return head_ if there is no such node. Node* FindLessThan(const Key& key)const;
// Return the last node in the list. // Return head_ if list is empty. Node* FindLast()const;
Comparator const compare_; Arena* const arena_; // Arena used for allocations of nodes Node* const head_; std::atomic<int> max_height_; // Height of the entire list Random rnd_; };
template <typename Key, classComparator> void SkipList<Key, Comparator>::Insert(const Key& key) { // TODO(opt): We can use a barrier-free variant of FindGreaterOrEqual() // here since Insert() is externally synchronized. Node* prev[kMaxHeight]; Node* x = FindGreaterOrEqual(key, prev);
// Our data structure does not allow duplicate insertion assert(x == nullptr || !Equal(key, x->key));
int height = RandomHeight(); if (height > GetMaxHeight()) { // 更新当前最大高度,并将新增高度的前驱结点设置为 head_ for (int i = GetMaxHeight(); i < height; i++) { prev[i] = head_; } max_height_.store(height, std::memory_order_relaxed); }
// 插入节点 x = NewNode(key, height); for (int i = 0; i < height; i++) { // NoBarrier_SetNext() suffices since we will add a barrier when // we publish a pointer to "x" in prev[i]. x->NoBarrier_SetNext(i, prev[i]->NoBarrier_Next(i)); prev[i]->SetNext(i, x); } }
if (status.ok() && updates != nullptr) { // nullptr batch is for compactions WriteBatch* write_batch = BuildBatchGroup(&last_writer); WriteBatchInternal::SetSequence(write_batch, last_sequence + 1); last_sequence += WriteBatchInternal::Count(write_batch);
// Add to log and apply to memtable. We can release the lock // during this phase since &w is currently responsible for logging // and protects against concurrent loggers and concurrent writes // into mem_. { mutex_.Unlock(); // 先写 log status = log_->AddRecord(WriteBatchInternal::Contents(write_batch)); bool sync_error = false; if (status.ok() && options.sync) { // 如果开启了同步选项,将文件刷盘 status = logfile_->Sync(); } if (status.ok()) { // 插入 memTable status = WriteBatchInternal::InsertInto(write_batch, mem_); } mutex_.Lock(); } if (write_batch == tmp_batch_) tmp_batch_->Clear();