mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
initial commit
This commit is contained in:
commit
c2da007f40
1610 changed files with 398047 additions and 0 deletions
71
tddb/CMakeLists.txt
Normal file
71
tddb/CMakeLists.txt
Normal file
|
@ -0,0 +1,71 @@
|
|||
cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR)
|
||||
|
||||
#SOURCE SETS
|
||||
set(TDDB_SOURCE
|
||||
td/db/MemoryKeyValue.cpp
|
||||
|
||||
td/db/KeyValue.h
|
||||
td/db/KeyValueAsync.h
|
||||
td/db/MemoryKeyValue.h
|
||||
|
||||
td/db/utils/ChainBuffer.cpp
|
||||
td/db/utils/CyclicBuffer.cpp
|
||||
td/db/utils/FileSyncState.cpp
|
||||
td/db/utils/StreamInterface.cpp
|
||||
td/db/utils/StreamToFileActor.cpp
|
||||
td/db/utils/FileToStreamActor.cpp
|
||||
|
||||
td/db/utils/ChainBuffer.h
|
||||
td/db/utils/CyclicBuffer.h
|
||||
td/db/utils/FileSyncState.h
|
||||
td/db/utils/StreamInterface.h
|
||||
td/db/utils/StreamToFileActor.h
|
||||
td/db/utils/FileToStreamActor.h
|
||||
|
||||
td/db/binlog/Binlog.cpp
|
||||
td/db/binlog/BinlogReaderHelper.cpp
|
||||
|
||||
td/db/binlog/Binlog.h
|
||||
td/db/binlog/BinlogReaderHelper.h
|
||||
td/db/binlog/BinlogReaderInterface.h
|
||||
)
|
||||
|
||||
set(TDDB_ROCKSDB_SOURCE
|
||||
td/db/RocksDb.cpp
|
||||
td/db/RocksDb.h
|
||||
)
|
||||
|
||||
set(TDDB_TEST_SOURCE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test/key_value.cpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/test/binlog.cpp
|
||||
PARENT_SCOPE
|
||||
)
|
||||
|
||||
#RULES
|
||||
|
||||
#LIBRARIES
|
||||
|
||||
add_library(tddb STATIC ${TDDB_SOURCE})
|
||||
target_include_directories(tddb PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
|
||||
target_link_libraries(tddb PUBLIC tdutils tdactor)
|
||||
|
||||
if (TDDB_USE_ROCKSDB)
|
||||
target_sources(tddb PRIVATE ${TDDB_ROCKSDB_SOURCE})
|
||||
target_compile_definitions(tddb PUBLIC -DTDDB_USE_ROCKSDB)
|
||||
target_link_libraries(tddb PRIVATE rocksdb)
|
||||
target_include_directories(tddb PRIVATE $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../third-party/rocksdb/include>)
|
||||
endif()
|
||||
|
||||
add_executable(io-bench test/io-bench.cpp)
|
||||
target_link_libraries(io-bench tdutils tdactor tddb)
|
||||
|
||||
# BEGIN-INTERNAL
|
||||
#add_subdirectory(benchmark)
|
||||
|
||||
# END-INTERNAL
|
||||
install(TARGETS tddb EXPORT TdTargets
|
||||
LIBRARY DESTINATION lib
|
||||
ARCHIVE DESTINATION lib
|
||||
RUNTIME DESTINATION bin
|
||||
INCLUDES DESTINATION include
|
||||
)
|
116
tddb/td/db/KeyValue.h
Normal file
116
tddb/td/db/KeyValue.h
Normal file
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
#include "td/utils/Status.h"
|
||||
#include "td/utils/logging.h"
|
||||
namespace td {
|
||||
class KeyValueReader {
|
||||
public:
|
||||
virtual ~KeyValueReader() = default;
|
||||
enum class GetStatus : int32 { Ok, NotFound };
|
||||
|
||||
virtual Result<GetStatus> get(Slice key, std::string &value) = 0;
|
||||
virtual Result<size_t> count(Slice prefix) = 0;
|
||||
};
|
||||
|
||||
class PrefixedKeyValueReader : public KeyValueReader {
|
||||
public:
|
||||
PrefixedKeyValueReader(std::shared_ptr<KeyValueReader> reader, Slice prefix)
|
||||
: reader_(std::move(reader)), prefix_(prefix.str()) {
|
||||
}
|
||||
Result<GetStatus> get(Slice key, std::string &value) override {
|
||||
return reader_->get(PSLICE() << prefix_ << key, value);
|
||||
}
|
||||
Result<size_t> count(Slice prefix) override {
|
||||
return reader_->count(PSLICE() << prefix_ << prefix);
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<KeyValueReader> reader_;
|
||||
std::string prefix_;
|
||||
};
|
||||
|
||||
class KeyValueUtils {
|
||||
public:
|
||||
};
|
||||
|
||||
class KeyValue : public KeyValueReader {
|
||||
public:
|
||||
virtual Status set(Slice key, Slice value) = 0;
|
||||
virtual Status erase(Slice key) = 0;
|
||||
|
||||
virtual Status begin_transaction() = 0;
|
||||
virtual Status commit_transaction() = 0;
|
||||
virtual Status abort_transaction() = 0;
|
||||
// Desctructor will abort transaction
|
||||
|
||||
virtual std::unique_ptr<KeyValueReader> snapshot() = 0;
|
||||
|
||||
virtual std::string stats() const {
|
||||
return "";
|
||||
}
|
||||
virtual Status flush() {
|
||||
return Status::OK();
|
||||
}
|
||||
};
|
||||
class PrefixedKeyValue : public KeyValue {
|
||||
public:
|
||||
PrefixedKeyValue(std::shared_ptr<KeyValue> kv, Slice prefix) : kv_(std::move(kv)), prefix_(prefix.str()) {
|
||||
}
|
||||
Result<GetStatus> get(Slice key, std::string &value) override {
|
||||
return kv_->get(PSLICE() << prefix_ << key, value);
|
||||
}
|
||||
Result<size_t> count(Slice prefix) override {
|
||||
return kv_->count(PSLICE() << prefix_ << prefix);
|
||||
}
|
||||
Status set(Slice key, Slice value) override {
|
||||
return kv_->set(PSLICE() << prefix_ << key, value);
|
||||
}
|
||||
Status erase(Slice key) override {
|
||||
return kv_->erase(PSLICE() << prefix_ << key);
|
||||
}
|
||||
|
||||
Status begin_transaction() override {
|
||||
return kv_->begin_transaction();
|
||||
}
|
||||
Status commit_transaction() override {
|
||||
return kv_->commit_transaction();
|
||||
}
|
||||
Status abort_transaction() override {
|
||||
return kv_->abort_transaction();
|
||||
}
|
||||
// Desctructor will abort transaction
|
||||
|
||||
std::unique_ptr<KeyValueReader> snapshot() override {
|
||||
return kv_->snapshot();
|
||||
}
|
||||
|
||||
std::string stats() const override {
|
||||
return kv_->stats();
|
||||
}
|
||||
Status flush() override {
|
||||
return kv_->flush();
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<KeyValue> kv_;
|
||||
std::string prefix_;
|
||||
};
|
||||
|
||||
} // namespace td
|
158
tddb/td/db/KeyValueAsync.h
Normal file
158
tddb/td/db/KeyValueAsync.h
Normal file
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "td/actor/actor.h"
|
||||
#include "td/actor/PromiseFuture.h"
|
||||
|
||||
#include "td/db/KeyValue.h"
|
||||
|
||||
namespace td {
|
||||
|
||||
template <class KeyT, class ValueT>
|
||||
class KeyValueActor;
|
||||
|
||||
template <class KeyT, class ValueT>
|
||||
class KeyValueAsync {
|
||||
public:
|
||||
using ActorType = KeyValueActor<KeyT, ValueT>;
|
||||
struct GetResult {
|
||||
KeyValue::GetStatus status;
|
||||
ValueT value;
|
||||
};
|
||||
KeyValueAsync(std::shared_ptr<KeyValue> key_value);
|
||||
void get(KeyT key, Promise<GetResult> promise = {});
|
||||
void set(KeyT key, ValueT value, Promise<Unit> promise = {}, double sync_delay = 0);
|
||||
void erase(KeyT key, Promise<Unit> promise = {}, double sync_delay = 0);
|
||||
|
||||
KeyValueAsync();
|
||||
KeyValueAsync(KeyValueAsync &&);
|
||||
KeyValueAsync &operator=(KeyValueAsync &&);
|
||||
~KeyValueAsync();
|
||||
|
||||
private:
|
||||
actor::ActorOwn<ActorType> actor_;
|
||||
};
|
||||
|
||||
template <class KeyT, class ValueT>
|
||||
class KeyValueActor : public actor::Actor {
|
||||
public:
|
||||
KeyValueActor(std::shared_ptr<KeyValue> key_value) : key_value_(std::move(key_value)) {
|
||||
}
|
||||
|
||||
void get(KeyT key, Promise<typename KeyValueAsync<KeyT, ValueT>::GetResult> promise) {
|
||||
std::string value;
|
||||
auto r_status = key_value_->get(as_slice(key), value);
|
||||
if (r_status.is_error()) {
|
||||
promise.set_error(r_status.move_as_error());
|
||||
return;
|
||||
}
|
||||
typename KeyValueAsync<KeyT, ValueT>::GetResult result;
|
||||
result.status = r_status.move_as_ok();
|
||||
if (result.status == KeyValue::GetStatus::Ok) {
|
||||
result.value = ValueT(std::move(value));
|
||||
}
|
||||
promise.set_value(std::move(result));
|
||||
}
|
||||
void set(KeyT key, ValueT value, Promise<Unit> promise, double sync_delay) {
|
||||
schedule_sync(std::move(promise), sync_delay);
|
||||
key_value_->set(as_slice(key), as_slice(value));
|
||||
}
|
||||
void erase(KeyT key, Promise<Unit> promise, double sync_delay) {
|
||||
schedule_sync(std::move(promise), sync_delay);
|
||||
key_value_->erase(as_slice(key));
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<KeyValue> key_value_;
|
||||
std::vector<Promise<Unit>> pending_promises_;
|
||||
bool need_sync_ = false;
|
||||
bool sync_active_ = false;
|
||||
|
||||
void tear_down() override {
|
||||
sync();
|
||||
}
|
||||
void sync() {
|
||||
if (!need_sync_) {
|
||||
return;
|
||||
}
|
||||
need_sync_ = false;
|
||||
sync_active_ = false;
|
||||
key_value_->commit_transaction();
|
||||
for (auto &promise : pending_promises_) {
|
||||
promise.set_value(Unit());
|
||||
}
|
||||
pending_promises_.clear();
|
||||
}
|
||||
void schedule_sync(Promise<Unit> promise, double sync_delay) {
|
||||
if (!need_sync_) {
|
||||
key_value_->begin_transaction();
|
||||
need_sync_ = true;
|
||||
}
|
||||
|
||||
if (!sync_active_) {
|
||||
if (sync_delay == 0) {
|
||||
send_sync();
|
||||
} else {
|
||||
alarm_timestamp().relax(Timestamp::in(sync_delay));
|
||||
}
|
||||
}
|
||||
if (promise) {
|
||||
pending_promises_.push_back(std::move(promise));
|
||||
}
|
||||
}
|
||||
void alarm() override {
|
||||
if (need_sync_ && !sync_active_) {
|
||||
send_sync();
|
||||
}
|
||||
}
|
||||
void send_sync() {
|
||||
sync_active_ = true;
|
||||
alarm_timestamp() = Timestamp::never();
|
||||
send_closure(actor_id(this), &KeyValueActor<KeyT, ValueT>::sync);
|
||||
}
|
||||
};
|
||||
|
||||
template <class KeyT, class ValueT>
|
||||
KeyValueAsync<KeyT, ValueT>::KeyValueAsync() = default;
|
||||
template <class KeyT, class ValueT>
|
||||
KeyValueAsync<KeyT, ValueT>::KeyValueAsync(KeyValueAsync &&) = default;
|
||||
template <class KeyT, class ValueT>
|
||||
KeyValueAsync<KeyT, ValueT> &KeyValueAsync<KeyT, ValueT>::operator=(KeyValueAsync &&) = default;
|
||||
template <class KeyT, class ValueT>
|
||||
KeyValueAsync<KeyT, ValueT>::~KeyValueAsync() = default;
|
||||
|
||||
template <class KeyT, class ValueT>
|
||||
KeyValueAsync<KeyT, ValueT>::KeyValueAsync(std::shared_ptr<KeyValue> key_value) {
|
||||
actor_ = actor::create_actor<ActorType>("KeyValueActor", std::move(key_value));
|
||||
}
|
||||
template <class KeyT, class ValueT>
|
||||
void KeyValueAsync<KeyT, ValueT>::get(KeyT key, Promise<GetResult> promise) {
|
||||
send_closure_later(actor_, &ActorType::get, std::move(key), std::move(promise));
|
||||
}
|
||||
template <class KeyT, class ValueT>
|
||||
void KeyValueAsync<KeyT, ValueT>::set(KeyT key, ValueT value, Promise<Unit> promise, double sync_delay) {
|
||||
send_closure_later(actor_, &ActorType::set, std::move(key), std::move(value), std::move(promise), sync_delay);
|
||||
}
|
||||
template <class KeyT, class ValueT>
|
||||
void KeyValueAsync<KeyT, ValueT>::erase(KeyT key, Promise<Unit> promise, double sync_delay) {
|
||||
send_closure_later(actor_, &ActorType::erase, std::move(key), std::move(promise), sync_delay);
|
||||
}
|
||||
|
||||
} // namespace td
|
74
tddb/td/db/MemoryKeyValue.cpp
Normal file
74
tddb/td/db/MemoryKeyValue.cpp
Normal file
|
@ -0,0 +1,74 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "td/db/MemoryKeyValue.h"
|
||||
|
||||
#include "td/utils/format.h"
|
||||
|
||||
namespace td {
|
||||
Result<MemoryKeyValue::GetStatus> MemoryKeyValue::get(Slice key, std::string &value) {
|
||||
auto it = map_.find(key);
|
||||
if (it == map_.end()) {
|
||||
return GetStatus::NotFound;
|
||||
}
|
||||
value = it->second;
|
||||
return GetStatus::Ok;
|
||||
}
|
||||
Status MemoryKeyValue::set(Slice key, Slice value) {
|
||||
map_[key.str()] = value.str();
|
||||
return Status::OK();
|
||||
}
|
||||
Status MemoryKeyValue::erase(Slice key) {
|
||||
auto it = map_.find(key);
|
||||
if (it != map_.end()) {
|
||||
map_.erase(it);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Result<size_t> MemoryKeyValue::count(Slice prefix) {
|
||||
size_t res = 0;
|
||||
for (auto it = map_.lower_bound(prefix); it != map_.end(); it++) {
|
||||
if (Slice(it->first).truncate(prefix.size()) != prefix) {
|
||||
break;
|
||||
}
|
||||
res++;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::unique_ptr<KeyValueReader> MemoryKeyValue::snapshot() {
|
||||
auto res = std::make_unique<MemoryKeyValue>();
|
||||
res->map_ = map_;
|
||||
return std::move(res);
|
||||
}
|
||||
|
||||
std::string MemoryKeyValue::stats() const {
|
||||
return PSTRING() << "MemoryKeyValueStats{" << tag("get_count", get_count_) << "}";
|
||||
}
|
||||
|
||||
Status MemoryKeyValue::begin_transaction() {
|
||||
UNREACHABLE();
|
||||
}
|
||||
Status MemoryKeyValue::commit_transaction() {
|
||||
UNREACHABLE();
|
||||
}
|
||||
Status MemoryKeyValue::abort_transaction() {
|
||||
UNREACHABLE();
|
||||
}
|
||||
} // namespace td
|
48
tddb/td/db/MemoryKeyValue.h
Normal file
48
tddb/td/db/MemoryKeyValue.h
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
#include "td/db/KeyValue.h"
|
||||
|
||||
#include <map>
|
||||
|
||||
namespace td {
|
||||
class MemoryKeyValue : public KeyValue {
|
||||
public:
|
||||
Result<GetStatus> get(Slice key, std::string &value) override;
|
||||
Status set(Slice key, Slice value) override;
|
||||
Status erase(Slice key) override;
|
||||
Result<size_t> count(Slice prefix) override;
|
||||
|
||||
Status begin_transaction() override;
|
||||
Status commit_transaction() override;
|
||||
Status abort_transaction() override;
|
||||
|
||||
std::unique_ptr<KeyValueReader> snapshot() override;
|
||||
|
||||
std::string stats() const override;
|
||||
|
||||
private:
|
||||
class Cmp : public std::less<> {
|
||||
public:
|
||||
using is_transparent = void;
|
||||
};
|
||||
std::map<std::string, std::string, Cmp> map_;
|
||||
int64 get_count_{0};
|
||||
};
|
||||
} // namespace td
|
199
tddb/td/db/RocksDb.cpp
Normal file
199
tddb/td/db/RocksDb.cpp
Normal file
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "td/db/RocksDb.h"
|
||||
|
||||
#include "rocksdb/db.h"
|
||||
#include "rocksdb/statistics.h"
|
||||
#include "rocksdb/write_batch.h"
|
||||
#include "rocksdb/utilities/optimistic_transaction_db.h"
|
||||
#include "rocksdb/utilities/transaction.h"
|
||||
|
||||
namespace td {
|
||||
namespace {
|
||||
static Status from_rocksdb(rocksdb::Status status) {
|
||||
if (status.ok()) {
|
||||
return Status::OK();
|
||||
}
|
||||
return Status::Error(status.ToString());
|
||||
}
|
||||
static Slice from_rocksdb(rocksdb::Slice slice) {
|
||||
return Slice(slice.data(), slice.size());
|
||||
}
|
||||
static rocksdb::Slice to_rocksdb(Slice slice) {
|
||||
return rocksdb::Slice(slice.data(), slice.size());
|
||||
}
|
||||
} // namespace
|
||||
|
||||
Status RocksDb::destroy(Slice path) {
|
||||
return from_rocksdb(rocksdb::DestroyDB(path.str(), {}));
|
||||
}
|
||||
|
||||
RocksDb::RocksDb(RocksDb &&) = default;
|
||||
RocksDb &RocksDb::operator=(RocksDb &&) = default;
|
||||
|
||||
RocksDb::~RocksDb() {
|
||||
if (!db_) {
|
||||
return;
|
||||
}
|
||||
end_snapshot().ensure();
|
||||
}
|
||||
|
||||
RocksDb RocksDb::clone() const {
|
||||
return RocksDb{db_, statistics_};
|
||||
}
|
||||
|
||||
Result<RocksDb> RocksDb::open(std::string path) {
|
||||
rocksdb::OptimisticTransactionDB *db;
|
||||
auto statistics = rocksdb::CreateDBStatistics();
|
||||
{
|
||||
rocksdb::Options options;
|
||||
options.manual_wal_flush = true;
|
||||
options.create_if_missing = true;
|
||||
options.max_background_compactions = 4;
|
||||
options.max_background_flushes = 2;
|
||||
options.bytes_per_sync = 1 << 20;
|
||||
options.writable_file_max_buffer_size = 2 << 14;
|
||||
options.statistics = statistics;
|
||||
TRY_STATUS(from_rocksdb(rocksdb::OptimisticTransactionDB::Open(options, std::move(path), &db)));
|
||||
}
|
||||
return RocksDb(std::shared_ptr<rocksdb::OptimisticTransactionDB>(db), std::move(statistics));
|
||||
}
|
||||
|
||||
std::unique_ptr<KeyValueReader> RocksDb::snapshot() {
|
||||
auto res = std::make_unique<RocksDb>(clone());
|
||||
res->begin_snapshot().ensure();
|
||||
return std::move(res);
|
||||
}
|
||||
|
||||
std::string RocksDb::stats() const {
|
||||
return statistics_->ToString();
|
||||
}
|
||||
|
||||
Result<RocksDb::GetStatus> RocksDb::get(Slice key, std::string &value) {
|
||||
//LOG(ERROR) << "GET";
|
||||
rocksdb::Status status;
|
||||
if (snapshot_) {
|
||||
rocksdb::ReadOptions options;
|
||||
options.snapshot = snapshot_.get();
|
||||
status = db_->Get(options, to_rocksdb(key), &value);
|
||||
} else if (transaction_) {
|
||||
status = transaction_->Get({}, to_rocksdb(key), &value);
|
||||
} else {
|
||||
status = db_->Get({}, to_rocksdb(key), &value);
|
||||
}
|
||||
if (status.ok()) {
|
||||
return GetStatus::Ok;
|
||||
}
|
||||
if (status.code() == rocksdb::Status::kNotFound) {
|
||||
return GetStatus::NotFound;
|
||||
}
|
||||
return from_rocksdb(status);
|
||||
}
|
||||
|
||||
Status RocksDb::set(Slice key, Slice value) {
|
||||
if (write_batch_) {
|
||||
return from_rocksdb(write_batch_->Put(to_rocksdb(key), to_rocksdb(value)));
|
||||
}
|
||||
if (transaction_) {
|
||||
return from_rocksdb(transaction_->Put(to_rocksdb(key), to_rocksdb(value)));
|
||||
}
|
||||
return from_rocksdb(db_->Put({}, to_rocksdb(key), to_rocksdb(value)));
|
||||
}
|
||||
|
||||
Status RocksDb::erase(Slice key) {
|
||||
if (write_batch_) {
|
||||
return from_rocksdb(write_batch_->Delete(to_rocksdb(key)));
|
||||
}
|
||||
if (transaction_) {
|
||||
return from_rocksdb(transaction_->Delete(to_rocksdb(key)));
|
||||
}
|
||||
return from_rocksdb(db_->Delete({}, to_rocksdb(key)));
|
||||
}
|
||||
|
||||
Result<size_t> RocksDb::count(Slice prefix) {
|
||||
rocksdb::ReadOptions options;
|
||||
options.snapshot = snapshot_.get();
|
||||
std::unique_ptr<rocksdb::Iterator> iterator;
|
||||
if (snapshot_ || !transaction_) {
|
||||
iterator.reset(db_->NewIterator(options));
|
||||
} else {
|
||||
iterator.reset(transaction_->GetIterator(options));
|
||||
}
|
||||
|
||||
size_t res = 0;
|
||||
for (iterator->Seek(to_rocksdb(prefix)); iterator->Valid(); iterator->Next()) {
|
||||
if (from_rocksdb(iterator->key()).truncate(prefix.size()) != prefix) {
|
||||
break;
|
||||
}
|
||||
res++;
|
||||
}
|
||||
if (!iterator->status().ok()) {
|
||||
return from_rocksdb(iterator->status());
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
Status RocksDb::begin_transaction() {
|
||||
write_batch_ = std::make_unique<rocksdb::WriteBatch>();
|
||||
//transaction_.reset(db_->BeginTransaction({}, {}));
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status RocksDb::commit_transaction() {
|
||||
CHECK(write_batch_);
|
||||
auto write_batch = std::move(write_batch_);
|
||||
rocksdb::WriteOptions options;
|
||||
options.sync = true;
|
||||
TRY_STATUS(from_rocksdb(db_->Write(options, write_batch.get())));
|
||||
return Status::OK();
|
||||
|
||||
//CHECK(transaction_);
|
||||
//auto res = from_rocksdb(transaction_->Commit());
|
||||
//transaction_.reset();
|
||||
//return res;
|
||||
}
|
||||
|
||||
Status RocksDb::abort_transaction() {
|
||||
CHECK(write_batch_);
|
||||
write_batch_.reset();
|
||||
//CHECK(transaction_);
|
||||
//transaction_.reset();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status RocksDb::flush() {
|
||||
return from_rocksdb(db_->Flush({}));
|
||||
}
|
||||
|
||||
Status RocksDb::begin_snapshot() {
|
||||
snapshot_.reset(db_->GetSnapshot());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
Status RocksDb::end_snapshot() {
|
||||
if (snapshot_) {
|
||||
db_->ReleaseSnapshot(snapshot_.release());
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
RocksDb::RocksDb(std::shared_ptr<rocksdb::OptimisticTransactionDB> db, std::shared_ptr<rocksdb::Statistics> statistics)
|
||||
: db_(std::move(db)), statistics_(std::move(statistics)) {
|
||||
}
|
||||
} // namespace td
|
81
tddb/td/db/RocksDb.h
Normal file
81
tddb/td/db/RocksDb.h
Normal file
|
@ -0,0 +1,81 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#if !TDDB_USE_ROCKSDB
|
||||
#error "RocksDb is not supported"
|
||||
#endif
|
||||
|
||||
#include "td/db/KeyValue.h"
|
||||
#include "td/utils/Status.h"
|
||||
|
||||
namespace rocksdb {
|
||||
class OptimisticTransactionDB;
|
||||
class Transaction;
|
||||
class WriteBatch;
|
||||
class Snapshot;
|
||||
class Statistics;
|
||||
} // namespace rocksdb
|
||||
|
||||
namespace td {
|
||||
class RocksDb : public KeyValue {
|
||||
public:
|
||||
static Status destroy(Slice path);
|
||||
RocksDb clone() const;
|
||||
static Result<RocksDb> open(std::string path);
|
||||
|
||||
Result<GetStatus> get(Slice key, std::string &value) override;
|
||||
Status set(Slice key, Slice value) override;
|
||||
Status erase(Slice key) override;
|
||||
Result<size_t> count(Slice prefix) override;
|
||||
|
||||
Status begin_transaction() override;
|
||||
Status commit_transaction() override;
|
||||
Status abort_transaction() override;
|
||||
Status flush() override;
|
||||
|
||||
Status begin_snapshot();
|
||||
Status end_snapshot();
|
||||
|
||||
std::unique_ptr<KeyValueReader> snapshot() override;
|
||||
std::string stats() const override;
|
||||
|
||||
RocksDb(RocksDb &&);
|
||||
RocksDb &operator=(RocksDb &&);
|
||||
~RocksDb();
|
||||
|
||||
private:
|
||||
std::shared_ptr<rocksdb::OptimisticTransactionDB> db_;
|
||||
std::shared_ptr<rocksdb::Statistics> statistics_;
|
||||
|
||||
std::unique_ptr<rocksdb::Transaction> transaction_;
|
||||
std::unique_ptr<rocksdb::WriteBatch> write_batch_;
|
||||
class UnreachableDeleter {
|
||||
public:
|
||||
template <class T>
|
||||
void operator()(T *) {
|
||||
UNREACHABLE();
|
||||
}
|
||||
};
|
||||
std::unique_ptr<const rocksdb::Snapshot, UnreachableDeleter> snapshot_;
|
||||
|
||||
explicit RocksDb(std::shared_ptr<rocksdb::OptimisticTransactionDB> db,
|
||||
std::shared_ptr<rocksdb::Statistics> statistics);
|
||||
};
|
||||
} // namespace td
|
349
tddb/td/db/binlog/Binlog.cpp
Normal file
349
tddb/td/db/binlog/Binlog.cpp
Normal file
|
@ -0,0 +1,349 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "Binlog.h"
|
||||
|
||||
#include "BinlogReaderHelper.h"
|
||||
|
||||
#include "td/db/utils/StreamInterface.h"
|
||||
#include "td/db/utils/ChainBuffer.h"
|
||||
#include "td/db/utils/CyclicBuffer.h"
|
||||
#include "td/db/utils/FileSyncState.h"
|
||||
#include "td/db/utils/StreamToFileActor.h"
|
||||
#include "td/db/utils/FileToStreamActor.h"
|
||||
|
||||
#include "td/actor/actor.h"
|
||||
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/port/path.h"
|
||||
#include "td/utils/VectorQueue.h"
|
||||
|
||||
namespace td {
|
||||
namespace {
|
||||
class BinlogReplayActor : public actor::Actor {
|
||||
public:
|
||||
BinlogReplayActor(StreamReader stream_reader, actor::ActorOwn<FileToStreamActor> file_to_stream,
|
||||
std::shared_ptr<BinlogReaderInterface> binlog_reader, Promise<Unit> promise)
|
||||
: stream_reader_(std::move(stream_reader))
|
||||
, file_to_stream_(std::move(file_to_stream))
|
||||
, binlog_reader_(std::move(binlog_reader))
|
||||
, promise_(std::move(promise)) {
|
||||
}
|
||||
|
||||
private:
|
||||
StreamReader stream_reader_;
|
||||
actor::ActorOwn<FileToStreamActor> file_to_stream_;
|
||||
std::shared_ptr<BinlogReaderInterface> binlog_reader_;
|
||||
Promise<Unit> promise_;
|
||||
|
||||
bool is_writer_closed_{false};
|
||||
BinlogReaderHelper binlog_reader_helper_;
|
||||
|
||||
unique_ptr<FileToStreamActor::Callback> create_callback() {
|
||||
class Callback : public FileToStreamActor::Callback {
|
||||
public:
|
||||
Callback(actor::ActorShared<> actor) : actor_(std::move(actor)) {
|
||||
}
|
||||
void got_more() override {
|
||||
send_signals_later(actor_, actor::ActorSignals::wakeup());
|
||||
}
|
||||
|
||||
private:
|
||||
actor::ActorShared<> actor_;
|
||||
};
|
||||
return make_unique<Callback>(actor_shared(this));
|
||||
}
|
||||
|
||||
void start_up() override {
|
||||
send_closure_later(file_to_stream_, &FileToStreamActor::set_callback, create_callback());
|
||||
}
|
||||
void notify_writer() {
|
||||
send_signals_later(file_to_stream_, actor::ActorSignals::wakeup());
|
||||
}
|
||||
|
||||
void loop() override {
|
||||
auto status = do_loop();
|
||||
if (status.is_error()) {
|
||||
stream_reader_.close_reader(status.clone());
|
||||
promise_.set_error(std::move(status));
|
||||
return stop();
|
||||
}
|
||||
if (is_writer_closed_) {
|
||||
stream_reader_.close_reader(Status::OK());
|
||||
promise_.set_value(Unit());
|
||||
return stop();
|
||||
}
|
||||
}
|
||||
Status do_loop() {
|
||||
is_writer_closed_ = stream_reader_.is_writer_closed();
|
||||
if (is_writer_closed_) {
|
||||
TRY_STATUS(std::move(stream_reader_.writer_status()));
|
||||
}
|
||||
|
||||
// TODO: watermark want_more/got_more logic
|
||||
int64 got_size = stream_reader_.reader_size();
|
||||
while (got_size > 0) {
|
||||
auto slice = stream_reader_.prepare_read();
|
||||
TRY_STATUS(binlog_reader_helper_.parse(*binlog_reader_, slice));
|
||||
stream_reader_.confirm_read(slice.size());
|
||||
got_size -= slice.size();
|
||||
}
|
||||
notify_writer();
|
||||
|
||||
if (is_writer_closed_) {
|
||||
if (binlog_reader_helper_.unparsed_size() != 0) {
|
||||
return Status::Error(PSLICE() << "Got " << binlog_reader_helper_.unparsed_size()
|
||||
<< " unparsed bytes in binlog");
|
||||
}
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
Binlog::Binlog(string path) : path_(std::move(path)) {
|
||||
}
|
||||
|
||||
Status Binlog::replay_sync(BinlogReaderInterface& binlog_reader) {
|
||||
TRY_RESULT(fd, FileFd::open(path_, FileFd::Flags::Read));
|
||||
// No need to use Cyclic buffer, but CyclicBuffer is important for async version
|
||||
CyclicBuffer::Options options;
|
||||
options.chunk_size = 256;
|
||||
options.count = 1;
|
||||
auto reader_writer = CyclicBuffer::create(options);
|
||||
|
||||
auto buf_reader = std::move(reader_writer.first);
|
||||
auto buf_writer = std::move(reader_writer.second);
|
||||
|
||||
TRY_RESULT(fd_size, fd.get_size());
|
||||
|
||||
BinlogReaderHelper helper;
|
||||
while (fd_size != 0) {
|
||||
auto read_to = buf_writer.prepare_write();
|
||||
if (static_cast<int64>(read_to.size()) > fd_size) {
|
||||
read_to.truncate(narrow_cast<size_t>(fd_size));
|
||||
}
|
||||
TRY_RESULT(read, fd.read(read_to));
|
||||
if (read == 0) {
|
||||
return Status::Error("Unexpected end of file");
|
||||
}
|
||||
fd_size -= read;
|
||||
buf_writer.confirm_write(read);
|
||||
|
||||
auto data = buf_reader.prepare_read();
|
||||
CHECK(data.size() == read);
|
||||
TRY_STATUS(helper.parse(binlog_reader, data));
|
||||
buf_reader.confirm_read(data.size());
|
||||
}
|
||||
|
||||
if (helper.unparsed_size() != 0) {
|
||||
return Status::Error(PSLICE() << "Got " << helper.unparsed_size() << " unparsed bytes in binlog");
|
||||
}
|
||||
|
||||
//TODO: check crc32
|
||||
//TODO: allow binlog truncate
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
void Binlog::replay_async(std::shared_ptr<BinlogReaderInterface> binlog_reader, Promise<Unit> promise) {
|
||||
auto r_fd = FileFd::open(path_, FileFd::Flags::Read);
|
||||
if (r_fd.is_error()) {
|
||||
promise.set_error(r_fd.move_as_error());
|
||||
return;
|
||||
}
|
||||
auto fd = r_fd.move_as_ok();
|
||||
CyclicBuffer::Options buf_options;
|
||||
buf_options.chunk_size = 256;
|
||||
auto reader_writer = CyclicBuffer::create(buf_options);
|
||||
|
||||
auto buf_reader = std::move(reader_writer.first);
|
||||
auto buf_writer = std::move(reader_writer.second);
|
||||
|
||||
auto r_fd_size = fd.get_size();
|
||||
if (r_fd_size.is_error()) {
|
||||
promise.set_error(r_fd_size.move_as_error());
|
||||
}
|
||||
auto options = FileToStreamActor::Options{};
|
||||
options.limit = r_fd_size.move_as_ok();
|
||||
auto file_to_stream =
|
||||
actor::create_actor<FileToStreamActor>("FileToStream", std::move(fd), std::move(buf_writer), options);
|
||||
auto stream_to_binlog = actor::create_actor<BinlogReplayActor>(
|
||||
"BinlogReplay", std::move(buf_reader), std::move(file_to_stream), std::move(binlog_reader), std::move(promise));
|
||||
stream_to_binlog.release();
|
||||
}
|
||||
|
||||
void Binlog::destroy(CSlice path) {
|
||||
td::unlink(path).ignore();
|
||||
}
|
||||
|
||||
void Binlog::destroy() {
|
||||
destroy(path_);
|
||||
}
|
||||
|
||||
BinlogWriter::BinlogWriter(std::string path) : path_(std::move(path)) {
|
||||
}
|
||||
|
||||
Status BinlogWriter::open() {
|
||||
TRY_RESULT(fd, FileFd::open(path_, FileFd::Flags::Write | FileFd::Flags::Append | FileFd::Create));
|
||||
fd_ = std::move(fd);
|
||||
ChainBuffer::Options buf_options;
|
||||
buf_options.max_io_slices = 128;
|
||||
buf_options.chunk_size = 256;
|
||||
auto reader_writer = ChainBuffer::create(buf_options);
|
||||
buf_reader_ = std::move(reader_writer.first);
|
||||
buf_writer_ = std::move(reader_writer.second);
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status BinlogWriter::lazy_flush() {
|
||||
if (buf_reader_.reader_size() < 512) {
|
||||
return Status::OK();
|
||||
}
|
||||
return flush();
|
||||
}
|
||||
|
||||
Status BinlogWriter::flush() {
|
||||
while (buf_reader_.reader_size() != 0) {
|
||||
TRY_RESULT(written, fd_.writev(buf_reader_.prepare_readv()));
|
||||
buf_reader_.confirm_read(written);
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
Status BinlogWriter::sync() {
|
||||
flush();
|
||||
return fd_.sync();
|
||||
}
|
||||
|
||||
Status BinlogWriter::close() {
|
||||
sync();
|
||||
fd_.close();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
namespace detail {
|
||||
class FlushHelperActor : public actor::Actor {
|
||||
public:
|
||||
FlushHelperActor(FileSyncState::Reader sync_state_reader, actor::ActorOwn<StreamToFileActor> actor)
|
||||
: sync_state_reader_(std::move(sync_state_reader)), actor_(std::move(actor)) {
|
||||
}
|
||||
void flush() {
|
||||
//TODO;
|
||||
}
|
||||
void sync(size_t position, Promise<Unit> promise) {
|
||||
sync_state_reader_.set_requested_sync_size(position);
|
||||
if (promise) {
|
||||
queries_.emplace(position, std::move(promise));
|
||||
}
|
||||
send_signals_later(actor_, actor::ActorSignals::wakeup());
|
||||
}
|
||||
|
||||
void close(Promise<> promise) {
|
||||
close_promise_ = std::move(promise);
|
||||
actor_.reset();
|
||||
}
|
||||
|
||||
private:
|
||||
FileSyncState::Reader sync_state_reader_;
|
||||
actor::ActorOwn<StreamToFileActor> actor_;
|
||||
Promise<> close_promise_;
|
||||
|
||||
struct Query {
|
||||
Query(size_t position, Promise<Unit> promise) : position(position), promise(std::move(promise)) {
|
||||
}
|
||||
size_t position;
|
||||
Promise<Unit> promise;
|
||||
};
|
||||
VectorQueue<Query> queries_;
|
||||
|
||||
unique_ptr<StreamToFileActor::Callback> create_callback() {
|
||||
class Callback : public StreamToFileActor::Callback {
|
||||
public:
|
||||
Callback(actor::ActorShared<> actor) : actor_(std::move(actor)) {
|
||||
}
|
||||
void on_sync_state_changed() override {
|
||||
send_signals_later(actor_, actor::ActorSignals::wakeup());
|
||||
}
|
||||
|
||||
private:
|
||||
actor::ActorShared<> actor_;
|
||||
};
|
||||
return make_unique<Callback>(actor_shared(this));
|
||||
}
|
||||
|
||||
void start_up() override {
|
||||
send_closure_later(actor_, &StreamToFileActor::set_callback, create_callback());
|
||||
}
|
||||
|
||||
void loop() override {
|
||||
auto synced_position = sync_state_reader_.synced_size();
|
||||
while (!queries_.empty() && queries_.front().position <= synced_position) {
|
||||
queries_.front().promise.set_value(Unit());
|
||||
queries_.pop();
|
||||
}
|
||||
}
|
||||
|
||||
void hangup_shared() override {
|
||||
stop();
|
||||
}
|
||||
void tear_down() override {
|
||||
if (close_promise_) {
|
||||
close_promise_.set_value(Unit());
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace detail
|
||||
BinlogWriterAsync::BinlogWriterAsync(std::string path) : path_(std::move(path)) {
|
||||
}
|
||||
BinlogWriterAsync::~BinlogWriterAsync() = default;
|
||||
|
||||
Status BinlogWriterAsync::open() {
|
||||
TRY_RESULT(fd, FileFd::open(path_, FileFd::Flags::Write | FileFd::Flags::Append | FileFd::Create));
|
||||
ChainBuffer::Options buf_options;
|
||||
buf_options.max_io_slices = 128;
|
||||
buf_options.chunk_size = 256;
|
||||
auto reader_writer = ChainBuffer::create(buf_options);
|
||||
buf_writer_ = std::move(reader_writer.second);
|
||||
|
||||
auto sync_state_reader_writer = td::FileSyncState::create();
|
||||
auto writer_actor = actor::create_actor<StreamToFileActor>("StreamToFile", std::move(reader_writer.first),
|
||||
std::move(fd), std::move(sync_state_reader_writer.second));
|
||||
writer_actor_ = writer_actor.get();
|
||||
sync_state_reader_ = std::move(sync_state_reader_writer.first);
|
||||
|
||||
flush_helper_actor_ =
|
||||
actor::create_actor<detail::FlushHelperActor>("FlushHelperActor", sync_state_reader_, std::move(writer_actor));
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
void BinlogWriterAsync::close(Promise<> promise) {
|
||||
send_closure(std::move(flush_helper_actor_), &detail::FlushHelperActor::close, std::move(promise));
|
||||
writer_actor_ = {};
|
||||
}
|
||||
void BinlogWriterAsync::lazy_flush() {
|
||||
send_signals_later(writer_actor_, actor::ActorSignals::wakeup());
|
||||
}
|
||||
|
||||
void BinlogWriterAsync::flush() {
|
||||
send_closure(flush_helper_actor_, &detail::FlushHelperActor::flush);
|
||||
}
|
||||
void BinlogWriterAsync::sync(Promise<Unit> promise) {
|
||||
send_closure(flush_helper_actor_, &detail::FlushHelperActor::sync, buf_writer_.writer_size(), std::move(promise));
|
||||
}
|
||||
|
||||
} // namespace td
|
135
tddb/td/db/binlog/Binlog.h
Normal file
135
tddb/td/db/binlog/Binlog.h
Normal file
|
@ -0,0 +1,135 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "BinlogReaderInterface.h"
|
||||
|
||||
#include "td/db/utils/FileSyncState.h"
|
||||
#include "td/db/utils/StreamInterface.h"
|
||||
|
||||
#include "td/actor/actor.h"
|
||||
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/port/FileFd.h"
|
||||
|
||||
namespace td {
|
||||
class BinlogReaderInterface;
|
||||
class StreamToFileActor;
|
||||
namespace detail {
|
||||
class FlushHelperActor;
|
||||
} // namespace detail
|
||||
class Binlog {
|
||||
public:
|
||||
explicit Binlog(string path);
|
||||
|
||||
Status replay_sync(BinlogReaderInterface& binlog_reader);
|
||||
void replay_async(std::shared_ptr<BinlogReaderInterface> binlog_reader, Promise<Unit> promise);
|
||||
|
||||
static void destroy(CSlice path);
|
||||
void destroy();
|
||||
|
||||
private:
|
||||
string path_;
|
||||
};
|
||||
|
||||
class BinlogWriter {
|
||||
public:
|
||||
BinlogWriter(std::string path);
|
||||
|
||||
Status open();
|
||||
|
||||
template <class EventT>
|
||||
Status write_event(EventT&& event, BinlogReaderInterface* binlog_reader);
|
||||
Status lazy_flush();
|
||||
Status flush();
|
||||
Status sync();
|
||||
|
||||
Status close();
|
||||
|
||||
private:
|
||||
string path_;
|
||||
FileFd fd_;
|
||||
|
||||
StreamReader buf_reader_;
|
||||
StreamWriter buf_writer_;
|
||||
};
|
||||
|
||||
class BinlogWriterAsync {
|
||||
public:
|
||||
BinlogWriterAsync(std::string path);
|
||||
~BinlogWriterAsync();
|
||||
|
||||
Status open();
|
||||
|
||||
template <class EventT>
|
||||
Status write_event(EventT&& event, BinlogReaderInterface* binlog_reader);
|
||||
|
||||
void close(Promise<> promise);
|
||||
|
||||
void lazy_flush();
|
||||
|
||||
void flush();
|
||||
void sync(Promise<Unit> promise = {});
|
||||
|
||||
private:
|
||||
std::string path_;
|
||||
StreamWriter buf_writer_;
|
||||
actor::ActorId<StreamToFileActor> writer_actor_;
|
||||
actor::ActorOwn<detail::FlushHelperActor> flush_helper_actor_;
|
||||
|
||||
FileSyncState::Reader sync_state_reader_;
|
||||
};
|
||||
|
||||
template <class EventT>
|
||||
Status BinlogWriter::write_event(EventT&& event, BinlogReaderInterface* binlog_reader) {
|
||||
int64 need_size = -event.serialize({});
|
||||
auto dest =
|
||||
buf_writer_.prepare_write_at_least(narrow_cast<size_t>(need_size)).truncate(narrow_cast<size_t>(need_size));
|
||||
auto written = event.serialize(dest);
|
||||
CHECK(written == need_size);
|
||||
|
||||
if (binlog_reader != nullptr) {
|
||||
TRY_RESULT(parsed, binlog_reader->parse(dest));
|
||||
binlog_reader->flush();
|
||||
CHECK(parsed == written);
|
||||
}
|
||||
|
||||
buf_writer_.confirm_write(narrow_cast<size_t>(written));
|
||||
return lazy_flush();
|
||||
}
|
||||
|
||||
template <class EventT>
|
||||
Status BinlogWriterAsync::write_event(EventT&& event, BinlogReaderInterface* binlog_reader) {
|
||||
int64 need_size = -event.serialize({});
|
||||
auto dest =
|
||||
buf_writer_.prepare_write_at_least(narrow_cast<size_t>(need_size)).truncate(narrow_cast<size_t>(need_size));
|
||||
auto written = event.serialize(dest);
|
||||
CHECK(written == need_size);
|
||||
|
||||
if (binlog_reader != nullptr) {
|
||||
TRY_RESULT(parsed, binlog_reader->parse(dest));
|
||||
CHECK(parsed == written);
|
||||
}
|
||||
|
||||
buf_writer_.confirm_write(narrow_cast<size_t>(written));
|
||||
lazy_flush();
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
} // namespace td
|
96
tddb/td/db/binlog/BinlogReaderHelper.cpp
Normal file
96
tddb/td/db/binlog/BinlogReaderHelper.cpp
Normal file
|
@ -0,0 +1,96 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "BinlogReaderHelper.h"
|
||||
#include "BinlogReaderInterface.h"
|
||||
|
||||
#include "td/utils/misc.h"
|
||||
|
||||
namespace td {
|
||||
td::Status BinlogReaderHelper::parse(BinlogReaderInterface& reader, td::Slice data) {
|
||||
SCOPE_EXIT {
|
||||
reader.flush();
|
||||
};
|
||||
while (true) {
|
||||
if (expected_prefix_size_ > 0 && expected_prefix_size_ == prefix_size_) {
|
||||
TRY_RESULT(size, reader.parse(MutableSlice(buf_.data(), prefix_size_)));
|
||||
if (size < 0) {
|
||||
if (expected_prefix_size_ > td::narrow_cast<size_t>(-size)) {
|
||||
return td::Status::Error("BinlogReader decreased logevent size estimation (1)");
|
||||
}
|
||||
expected_prefix_size_ = static_cast<size_t>(-size);
|
||||
} else {
|
||||
if (expected_prefix_size_ != td::narrow_cast<size_t>(size)) {
|
||||
return td::Status::Error("BinlogReader changed logevent");
|
||||
}
|
||||
prefix_size_ = 0;
|
||||
expected_prefix_size_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (data.empty()) {
|
||||
break;
|
||||
}
|
||||
|
||||
if (expected_prefix_size_ > 0) {
|
||||
CHECK(expected_prefix_size_ < buf_.size());
|
||||
CHECK(prefix_size_ < expected_prefix_size_);
|
||||
auto got = data.copy().truncate(expected_prefix_size_ - prefix_size_);
|
||||
reader.flush();
|
||||
auto dest = td::MutableSlice(buf_.data(), buf_.size()).substr(prefix_size_);
|
||||
if (dest.size() < got.size()) {
|
||||
return td::Status::Error("Too big logevent");
|
||||
}
|
||||
dest.copy_from(got);
|
||||
prefix_size_ += got.size();
|
||||
data = data.substr(got.size());
|
||||
continue;
|
||||
}
|
||||
|
||||
CHECK(!data.empty());
|
||||
|
||||
TRY_RESULT(size, reader.parse(data));
|
||||
if (size < 0) {
|
||||
expected_prefix_size_ = td::narrow_cast<size_t>(-size);
|
||||
prefix_size_ = data.size();
|
||||
if (expected_prefix_size_ < prefix_size_) {
|
||||
return td::Status::Error("BinlogReader waits for less data than it already has");
|
||||
}
|
||||
if (expected_prefix_size_ > buf_.size()) {
|
||||
return td::Status::Error("BinlogReader waits for too big logevent");
|
||||
}
|
||||
reader.flush();
|
||||
td::MutableSlice(buf_.data(), prefix_size_).copy_from(data);
|
||||
break;
|
||||
}
|
||||
if (size == 0) {
|
||||
return td::Status::Error("BinlogReader parseed nothing and asked for nothing");
|
||||
}
|
||||
if (td::narrow_cast<size_t>(size) > data.size()) {
|
||||
return td::Status::Error("BinlogReader parseed more than was given");
|
||||
}
|
||||
data = data.substr(static_cast<size_t>(size));
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
size_t BinlogReaderHelper::unparsed_size() const {
|
||||
return prefix_size_;
|
||||
}
|
||||
|
||||
} // namespace td
|
46
tddb/td/db/binlog/BinlogReaderHelper.h
Normal file
46
tddb/td/db/binlog/BinlogReaderHelper.h
Normal file
|
@ -0,0 +1,46 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "td/utils/Status.h"
|
||||
#include "td/utils/Slice.h"
|
||||
|
||||
namespace td {
|
||||
|
||||
class BinlogReaderInterface;
|
||||
|
||||
// Usually we have data available in chunks and we can't control chunk's sizes
|
||||
// And some events will be in multiple chunks.
|
||||
// We suggest that all events are small, chunks are big and only small
|
||||
// portion of events will lie on chunk's border.
|
||||
// This helper will store this rare events locally and will feed them
|
||||
// to BinlogReaderInterface as single memory chunk each.
|
||||
class BinlogReaderHelper {
|
||||
public:
|
||||
td::Status parse(BinlogReaderInterface& reader, td::Slice data);
|
||||
|
||||
size_t unparsed_size() const;
|
||||
|
||||
private:
|
||||
alignas(16) std::array<char, 1024> buf_;
|
||||
size_t prefix_size_{0};
|
||||
size_t expected_prefix_size_{0};
|
||||
};
|
||||
|
||||
} // namespace td
|
42
tddb/td/db/binlog/BinlogReaderInterface.h
Normal file
42
tddb/td/db/binlog/BinlogReaderInterface.h
Normal file
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "td/utils/Slice.h"
|
||||
#include "td/utils/Status.h"
|
||||
|
||||
namespace td {
|
||||
|
||||
class BinlogReaderInterface {
|
||||
public:
|
||||
virtual ~BinlogReaderInterface() {
|
||||
}
|
||||
// returns error or size
|
||||
// negative size means reader expects data.size() to be at least -size
|
||||
// positive size means first size bytes of data are processed and could be skipped
|
||||
virtual td::Result<td::int64> parse(td::Slice data) = 0;
|
||||
|
||||
// called when all passed slices are invalidated
|
||||
// Till it is called reader may resue all slices given to it.
|
||||
// It makes possible to calculate crc32c in larger chunks
|
||||
// TODO: maybe we should just process all data that we can at once
|
||||
virtual void flush() {
|
||||
}
|
||||
};
|
||||
} // namespace td
|
139
tddb/td/db/utils/ChainBuffer.cpp
Normal file
139
tddb/td/db/utils/ChainBuffer.cpp
Normal file
|
@ -0,0 +1,139 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "ChainBuffer.h"
|
||||
|
||||
#include "td/utils/buffer.h"
|
||||
#include "td/db/utils/StreamInterface.h"
|
||||
|
||||
namespace td {
|
||||
namespace detail {
|
||||
class ChainBuffer : public StreamWriterInterface, public StreamReaderInterface {
|
||||
public:
|
||||
using Options = ::td::ChainBuffer::Options;
|
||||
|
||||
ChainBuffer(Options options) {
|
||||
shared_.options_ = options;
|
||||
reader_.io_slices_.reserve(options.max_io_slices);
|
||||
reader_.buf_ = writer_.buf_.extract_reader();
|
||||
}
|
||||
|
||||
// StreamReaderInterface
|
||||
size_t reader_size() override {
|
||||
reader_.buf_.sync_with_writer();
|
||||
return reader_.buf_.size();
|
||||
}
|
||||
|
||||
Slice prepare_read() override {
|
||||
return reader_.buf_.prepare_read();
|
||||
}
|
||||
Span<IoSlice> prepare_readv() override {
|
||||
reader_.io_slices_.clear();
|
||||
auto it = reader_.buf_.clone();
|
||||
while (!it.empty() && reader_.io_slices_.size() < reader_.io_slices_.capacity()) {
|
||||
auto slice = it.prepare_read();
|
||||
reader_.io_slices_.push_back(as_io_slice(slice));
|
||||
it.confirm_read(slice.size());
|
||||
}
|
||||
return reader_.io_slices_;
|
||||
}
|
||||
void confirm_read(size_t size) override {
|
||||
reader_.buf_.advance(size);
|
||||
}
|
||||
|
||||
void close_reader(Status error) override {
|
||||
CHECK(!reader_.is_closed_);
|
||||
reader_.status_ = std::move(error);
|
||||
reader_.is_closed_.store(true, std::memory_order_release);
|
||||
}
|
||||
bool is_writer_closed() const override {
|
||||
return writer_.is_closed_.load(std::memory_order_acquire);
|
||||
}
|
||||
Status &writer_status() override {
|
||||
CHECK(is_writer_closed());
|
||||
return writer_.status_;
|
||||
}
|
||||
|
||||
// StreamWriterInterface
|
||||
size_t writer_size() override {
|
||||
return writer_.size_;
|
||||
}
|
||||
MutableSlice prepare_write() override {
|
||||
return writer_.buf_.prepare_append(shared_.options_.chunk_size);
|
||||
}
|
||||
MutableSlice prepare_write_at_least(size_t size) override {
|
||||
return writer_.buf_.prepare_append_at_least(size);
|
||||
}
|
||||
void confirm_write(size_t size) override {
|
||||
writer_.buf_.confirm_append(size);
|
||||
writer_.size_ += size;
|
||||
}
|
||||
void append(Slice data) override {
|
||||
writer_.buf_.append(data, shared_.options_.chunk_size);
|
||||
writer_.size_ += data.size();
|
||||
}
|
||||
void append(BufferSlice data) override {
|
||||
writer_.size_ += data.size();
|
||||
writer_.buf_.append(std::move(data));
|
||||
}
|
||||
void append(std::string data) override {
|
||||
append(Slice(data));
|
||||
}
|
||||
void close_writer(Status error) override {
|
||||
CHECK(!writer_.is_closed_);
|
||||
writer_.status_ = std::move(error);
|
||||
writer_.is_closed_.store(true, std::memory_order_release);
|
||||
}
|
||||
bool is_reader_closed() const override {
|
||||
return reader_.is_closed_.load(std::memory_order_acquire);
|
||||
}
|
||||
Status &reader_status() override {
|
||||
CHECK(is_reader_closed());
|
||||
return reader_.status_;
|
||||
}
|
||||
|
||||
private:
|
||||
struct SharedData {
|
||||
Options options_;
|
||||
} shared_;
|
||||
|
||||
char pad1[128];
|
||||
|
||||
struct ReaderData {
|
||||
ChainBufferReader buf_;
|
||||
std::atomic<bool> is_closed_{false};
|
||||
Status status_;
|
||||
std::vector<IoSlice> io_slices_;
|
||||
} reader_;
|
||||
|
||||
char pad2[128];
|
||||
|
||||
struct WriterData {
|
||||
ChainBufferWriter buf_;
|
||||
std::atomic<bool> is_closed_{false};
|
||||
Status status_;
|
||||
size_t size_{0};
|
||||
} writer_;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
std::pair<ChainBuffer::Reader, ChainBuffer::Writer> ChainBuffer::create(Options options) {
|
||||
auto impl = std::make_shared<detail::ChainBuffer>(options);
|
||||
return {Reader(impl), Writer(impl)};
|
||||
}
|
||||
} // namespace td
|
37
tddb/td/db/utils/ChainBuffer.h
Normal file
37
tddb/td/db/utils/ChainBuffer.h
Normal file
|
@ -0,0 +1,37 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
#include "td/utils/common.h"
|
||||
|
||||
#include "StreamInterface.h"
|
||||
|
||||
namespace td {
|
||||
class ChainBuffer {
|
||||
public:
|
||||
struct Options {
|
||||
Options() {
|
||||
}
|
||||
size_t chunk_size{1024 * 1024 / 8}; // default size of one chunk in chain buffer
|
||||
size_t max_io_slices{128}; // size of buffer for writev
|
||||
};
|
||||
using Reader = StreamReader;
|
||||
using Writer = StreamWriter;
|
||||
static std::pair<Reader, Writer> create(Options options = {});
|
||||
};
|
||||
} // namespace td
|
155
tddb/td/db/utils/CyclicBuffer.cpp
Normal file
155
tddb/td/db/utils/CyclicBuffer.cpp
Normal file
|
@ -0,0 +1,155 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "CyclicBuffer.h"
|
||||
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/Slice.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
|
||||
namespace td {
|
||||
namespace detail {
|
||||
class CyclicBuffer : public StreamWriterInterface, public StreamReaderInterface {
|
||||
public:
|
||||
using Options = ::td::CyclicBuffer::Options;
|
||||
|
||||
CyclicBuffer(Options options) {
|
||||
CHECK(options.chunk_size != 0);
|
||||
CHECK(options.count != 0);
|
||||
CHECK(options.alignment != 0);
|
||||
CHECK(options.chunk_size < (std::numeric_limits<size_t>::max() - options.alignment) / options.count);
|
||||
shared_.options_ = options;
|
||||
shared_.raw_data_ = std::make_unique<char[]>(options.size() + options.alignment - 1);
|
||||
auto pos = reinterpret_cast<uint64>(shared_.raw_data_.get());
|
||||
auto offset = (options.alignment - static_cast<size_t>(pos % options.alignment)) % options.alignment;
|
||||
CHECK(offset < options.alignment);
|
||||
shared_.data_ = MutableSlice(shared_.raw_data_.get() + offset, options.size());
|
||||
}
|
||||
|
||||
// StreamReaderInterface
|
||||
size_t reader_size() override {
|
||||
auto offset = reader_.pos_.load(std::memory_order_relaxed);
|
||||
auto size = writer_.pos_.load(std::memory_order_acquire) - offset;
|
||||
return narrow_cast<size_t>(size);
|
||||
}
|
||||
Slice prepare_read() override {
|
||||
auto offset = reader_.pos_.load(std::memory_order_relaxed);
|
||||
auto size = narrow_cast<size_t>(writer_.pos_.load(std::memory_order_acquire) - offset);
|
||||
if (size == 0) {
|
||||
return {};
|
||||
}
|
||||
offset %= (shared_.options_.chunk_size * shared_.options_.count);
|
||||
return shared_.data_.substr(narrow_cast<size_t>(offset)).truncate(size).truncate(shared_.options_.chunk_size);
|
||||
}
|
||||
Span<IoSlice> prepare_readv() override {
|
||||
reader_.io_slice_ = as_io_slice(prepare_read());
|
||||
return Span<IoSlice>(&reader_.io_slice_, 1);
|
||||
}
|
||||
void confirm_read(size_t size) override {
|
||||
reader_.pos_.store(reader_.pos_.load(std::memory_order_relaxed) + size);
|
||||
}
|
||||
|
||||
void close_reader(Status error) override {
|
||||
CHECK(!reader_.is_closed_);
|
||||
reader_.status_ = std::move(error);
|
||||
reader_.is_closed_.store(true, std::memory_order_release);
|
||||
}
|
||||
bool is_writer_closed() const override {
|
||||
return writer_.is_closed_.load(std::memory_order_acquire);
|
||||
}
|
||||
Status &writer_status() override {
|
||||
CHECK(is_writer_closed());
|
||||
return writer_.status_;
|
||||
}
|
||||
|
||||
// StreamWriterInterface
|
||||
size_t writer_size() override {
|
||||
auto offset = reader_.pos_.load(std::memory_order_acquire);
|
||||
auto size = writer_.pos_.load(std::memory_order_relaxed) - offset;
|
||||
return narrow_cast<size_t>(size);
|
||||
}
|
||||
MutableSlice prepare_write() override {
|
||||
auto max_offset =
|
||||
reader_.pos_.load(std::memory_order_acquire) + shared_.options_.chunk_size * (shared_.options_.count - 1);
|
||||
auto offset = writer_.pos_.load(std::memory_order_relaxed);
|
||||
if (offset > max_offset) {
|
||||
return {};
|
||||
}
|
||||
offset %= (shared_.options_.chunk_size * shared_.options_.count);
|
||||
return shared_.data_.substr(narrow_cast<size_t>(offset), shared_.options_.chunk_size);
|
||||
}
|
||||
MutableSlice prepare_write_at_least(size_t size) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
void confirm_write(size_t size) override {
|
||||
writer_.pos_.store(writer_.pos_.load(std::memory_order_relaxed) + size);
|
||||
}
|
||||
void append(Slice data) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
void append(BufferSlice data) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
void append(std::string data) override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
void close_writer(Status error) override {
|
||||
CHECK(!writer_.is_closed_);
|
||||
writer_.status_ = std::move(error);
|
||||
writer_.is_closed_.store(true, std::memory_order_release);
|
||||
}
|
||||
bool is_reader_closed() const override {
|
||||
return reader_.is_closed_.load(std::memory_order_acquire);
|
||||
}
|
||||
Status &reader_status() override {
|
||||
CHECK(is_reader_closed());
|
||||
return reader_.status_;
|
||||
}
|
||||
|
||||
private:
|
||||
struct SharedData {
|
||||
std::unique_ptr<char[]> raw_data_;
|
||||
MutableSlice data_;
|
||||
Options options_;
|
||||
} shared_;
|
||||
|
||||
struct ReaderData {
|
||||
std::atomic<uint64> pos_{0};
|
||||
std::atomic<bool> is_closed_{false};
|
||||
Status status_;
|
||||
IoSlice io_slice_;
|
||||
} reader_;
|
||||
|
||||
char pad[128];
|
||||
|
||||
struct WriterData {
|
||||
std::atomic<uint64> pos_{0};
|
||||
std::atomic<bool> is_closed_{false};
|
||||
Status status_;
|
||||
} writer_;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
std::pair<CyclicBuffer::Reader, CyclicBuffer::Writer> CyclicBuffer::create(Options options) {
|
||||
auto impl = std::make_shared<detail::CyclicBuffer>(options);
|
||||
return {Reader(impl), Writer(impl)};
|
||||
}
|
||||
} // namespace td
|
48
tddb/td/db/utils/CyclicBuffer.h
Normal file
48
tddb/td/db/utils/CyclicBuffer.h
Normal file
|
@ -0,0 +1,48 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "StreamInterface.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
namespace td {
|
||||
|
||||
class CyclicBuffer {
|
||||
public:
|
||||
struct Options {
|
||||
Options() {
|
||||
}
|
||||
size_t chunk_size{1024 * 1024 / 8};
|
||||
size_t count{16};
|
||||
size_t alignment{1024};
|
||||
|
||||
size_t size() const {
|
||||
return chunk_size * count;
|
||||
}
|
||||
size_t max_writable_size() {
|
||||
return size() - chunk_size;
|
||||
}
|
||||
};
|
||||
using Reader = StreamReader;
|
||||
using Writer = StreamWriter;
|
||||
static std::pair<Reader, Writer> create(Options options = {});
|
||||
};
|
||||
|
||||
} // namespace td
|
67
tddb/td/db/utils/FileSyncState.cpp
Normal file
67
tddb/td/db/utils/FileSyncState.cpp
Normal file
|
@ -0,0 +1,67 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
|
||||
#include "FileSyncState.h"
|
||||
namespace td {
|
||||
std::pair<FileSyncState::Reader, FileSyncState::Writer> FileSyncState::create() {
|
||||
auto self = std::make_shared<Self>();
|
||||
return {Reader(self), Writer(self)};
|
||||
}
|
||||
|
||||
FileSyncState::Reader::Reader(std::shared_ptr<Self> self) : self(std::move(self)) {
|
||||
}
|
||||
bool FileSyncState::Reader::set_requested_sync_size(size_t size) const {
|
||||
if (self->requested_synced_size.load(std::memory_order_relaxed) == size) {
|
||||
return false;
|
||||
}
|
||||
self->requested_synced_size.store(size, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
size_t FileSyncState::Reader::synced_size() const {
|
||||
return self->synced_size;
|
||||
}
|
||||
size_t FileSyncState::Reader::flushed_size() const {
|
||||
return self->flushed_size;
|
||||
}
|
||||
|
||||
FileSyncState::Writer::Writer(std::shared_ptr<Self> self) : self(std::move(self)) {
|
||||
}
|
||||
|
||||
size_t FileSyncState::Writer::get_requested_synced_size() {
|
||||
return self->requested_synced_size.load(std::memory_order_acquire);
|
||||
}
|
||||
|
||||
bool FileSyncState::Writer::set_synced_size(size_t size) {
|
||||
if (self->synced_size.load(std::memory_order_relaxed) == size) {
|
||||
return false;
|
||||
}
|
||||
self->synced_size.store(size, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool FileSyncState::Writer::set_flushed_size(size_t size) {
|
||||
if (self->flushed_size.load(std::memory_order_relaxed) == size) {
|
||||
return false;
|
||||
}
|
||||
self->flushed_size.store(size, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace td
|
65
tddb/td/db/utils/FileSyncState.h
Normal file
65
tddb/td/db/utils/FileSyncState.h
Normal file
|
@ -0,0 +1,65 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
#include <utility>
|
||||
#include <memory>
|
||||
#include <atomic>
|
||||
|
||||
#include "td/utils/common.h"
|
||||
|
||||
namespace td {
|
||||
class FileSyncState {
|
||||
struct Self;
|
||||
|
||||
public:
|
||||
class Reader {
|
||||
public:
|
||||
Reader() = default;
|
||||
Reader(std::shared_ptr<Self> self);
|
||||
bool set_requested_sync_size(size_t size) const;
|
||||
size_t synced_size() const;
|
||||
size_t flushed_size() const;
|
||||
|
||||
private:
|
||||
std::shared_ptr<Self> self;
|
||||
};
|
||||
|
||||
class Writer {
|
||||
public:
|
||||
Writer() = default;
|
||||
Writer(std::shared_ptr<Self> self);
|
||||
size_t get_requested_synced_size();
|
||||
bool set_synced_size(size_t size);
|
||||
bool set_flushed_size(size_t size);
|
||||
|
||||
private:
|
||||
std::shared_ptr<Self> self;
|
||||
};
|
||||
|
||||
static std::pair<Reader, Writer> create();
|
||||
|
||||
private:
|
||||
struct Self {
|
||||
std::atomic<size_t> requested_synced_size{0};
|
||||
|
||||
std::atomic<size_t> synced_size{0};
|
||||
std::atomic<size_t> flushed_size{0};
|
||||
};
|
||||
};
|
||||
} // namespace td
|
78
tddb/td/db/utils/FileToStreamActor.cpp
Normal file
78
tddb/td/db/utils/FileToStreamActor.cpp
Normal file
|
@ -0,0 +1,78 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "FileToStreamActor.h"
|
||||
|
||||
namespace td {
|
||||
FileToStreamActor::FileToStreamActor(FileFd fd, StreamWriter writer, Options options)
|
||||
: fd_(std::move(fd)), writer_(std::move(writer)), options_(options) {
|
||||
}
|
||||
|
||||
void FileToStreamActor::set_callback(td::unique_ptr<Callback> callback) {
|
||||
callback_ = std::move(callback);
|
||||
got_more();
|
||||
}
|
||||
|
||||
void FileToStreamActor::got_more() {
|
||||
if (!callback_) {
|
||||
return;
|
||||
}
|
||||
callback_->got_more();
|
||||
}
|
||||
void FileToStreamActor::loop() {
|
||||
auto dest = writer_.prepare_write();
|
||||
if (options_.limit != -1) {
|
||||
if (static_cast<int64>(dest.size()) > options_.limit) {
|
||||
dest.truncate(narrow_cast<size_t>(options_.limit));
|
||||
}
|
||||
}
|
||||
if (dest.empty()) {
|
||||
//NB: Owner of CyclicBufer::Reader should notify this actor after each chunk is readed
|
||||
return;
|
||||
}
|
||||
|
||||
auto r_size = fd_.read(dest);
|
||||
if (r_size.is_error()) {
|
||||
writer_.close_writer(r_size.move_as_error());
|
||||
got_more();
|
||||
return stop();
|
||||
}
|
||||
auto size = r_size.move_as_ok();
|
||||
writer_.confirm_write(size);
|
||||
got_more();
|
||||
if (options_.limit != -1) {
|
||||
options_.limit -= narrow_cast<int64>(size);
|
||||
}
|
||||
if (options_.limit == 0) {
|
||||
writer_.close_writer(td::Status::OK());
|
||||
got_more();
|
||||
return stop();
|
||||
}
|
||||
if (size == 0) {
|
||||
if (options_.read_tail_each < 0) {
|
||||
writer_.close_writer(td::Status::OK());
|
||||
got_more();
|
||||
return stop();
|
||||
}
|
||||
alarm_timestamp() = Timestamp::in(options_.read_tail_each);
|
||||
return;
|
||||
}
|
||||
yield();
|
||||
}
|
||||
|
||||
} // namespace td
|
53
tddb/td/db/utils/FileToStreamActor.h
Normal file
53
tddb/td/db/utils/FileToStreamActor.h
Normal file
|
@ -0,0 +1,53 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
|
||||
#include "StreamInterface.h"
|
||||
|
||||
#include "td/actor/actor.h"
|
||||
#include "td/utils/port/FileFd.h"
|
||||
|
||||
namespace td {
|
||||
class FileToStreamActor : public td::actor::Actor {
|
||||
public:
|
||||
struct Options {
|
||||
Options() {
|
||||
}
|
||||
int64 limit{-1};
|
||||
double read_tail_each{-1};
|
||||
};
|
||||
class Callback {
|
||||
public:
|
||||
virtual ~Callback() {
|
||||
}
|
||||
virtual void got_more() = 0;
|
||||
};
|
||||
FileToStreamActor(FileFd fd, StreamWriter writer, Options options = {});
|
||||
|
||||
void set_callback(td::unique_ptr<Callback> callback);
|
||||
|
||||
private:
|
||||
void got_more();
|
||||
void loop() override;
|
||||
FileFd fd_;
|
||||
StreamWriter writer_;
|
||||
td::unique_ptr<Callback> callback_;
|
||||
Options options_;
|
||||
};
|
||||
} // namespace td
|
79
tddb/td/db/utils/StreamInterface.cpp
Normal file
79
tddb/td/db/utils/StreamInterface.cpp
Normal file
|
@ -0,0 +1,79 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "StreamInterface.h"
|
||||
namespace td {
|
||||
StreamReader::StreamReader(std::shared_ptr<StreamReaderInterface> self) : self(std::move(self)) {
|
||||
}
|
||||
size_t StreamReader::reader_size() {
|
||||
return self->reader_size();
|
||||
}
|
||||
Slice StreamReader::prepare_read() {
|
||||
return self->prepare_read();
|
||||
}
|
||||
Span<IoSlice> StreamReader::prepare_readv() {
|
||||
return self->prepare_readv();
|
||||
}
|
||||
void StreamReader::confirm_read(size_t size) {
|
||||
return self->confirm_read(size);
|
||||
}
|
||||
|
||||
void StreamReader::close_reader(Status error) {
|
||||
return self->close_reader(std::move(error));
|
||||
}
|
||||
bool StreamReader::is_writer_closed() const {
|
||||
return self->is_writer_closed();
|
||||
}
|
||||
Status &StreamReader::writer_status() {
|
||||
return self->writer_status();
|
||||
}
|
||||
|
||||
StreamWriter::StreamWriter(std::shared_ptr<StreamWriterInterface> self) : self(std::move(self)) {
|
||||
}
|
||||
size_t StreamWriter::writer_size() {
|
||||
return self->writer_size();
|
||||
}
|
||||
MutableSlice StreamWriter::prepare_write() {
|
||||
return self->prepare_write();
|
||||
}
|
||||
MutableSlice StreamWriter::prepare_write_at_least(size_t size) {
|
||||
return self->prepare_write_at_least(size);
|
||||
}
|
||||
void StreamWriter::confirm_write(size_t size) {
|
||||
return self->confirm_write(size);
|
||||
}
|
||||
void StreamWriter::append(Slice data) {
|
||||
return self->append(data);
|
||||
}
|
||||
void StreamWriter::append(BufferSlice data) {
|
||||
return self->append(std::move(data));
|
||||
}
|
||||
void StreamWriter::append(std::string data) {
|
||||
return self->append(std::move(data));
|
||||
}
|
||||
|
||||
void StreamWriter::close_writer(Status error) {
|
||||
return self->close_writer(std::move(error));
|
||||
}
|
||||
bool StreamWriter::is_reader_closed() const {
|
||||
return self->is_reader_closed();
|
||||
}
|
||||
Status &StreamWriter::reader_status() {
|
||||
return self->reader_status();
|
||||
}
|
||||
} // namespace td
|
102
tddb/td/db/utils/StreamInterface.h
Normal file
102
tddb/td/db/utils/StreamInterface.h
Normal file
|
@ -0,0 +1,102 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
#include "td/utils/buffer.h"
|
||||
#include "td/utils/Slice.h"
|
||||
#include "td/utils/Span.h"
|
||||
#include "td/utils/port/IoSlice.h"
|
||||
|
||||
namespace td {
|
||||
// Generic stream interface
|
||||
// Will to hide implementations details.
|
||||
// CyclicBuffer, ChainBuffer, Bounded ChainBuffer, some clever writers. They all should be interchangable
|
||||
// Most implementaions will assume that reading and writing may happen concurrently
|
||||
|
||||
class StreamReaderInterface {
|
||||
public:
|
||||
virtual ~StreamReaderInterface() {
|
||||
}
|
||||
virtual size_t reader_size() = 0;
|
||||
virtual Slice prepare_read() = 0;
|
||||
virtual Span<IoSlice> prepare_readv() = 0;
|
||||
virtual void confirm_read(size_t size) = 0;
|
||||
|
||||
virtual void close_reader(Status error) = 0;
|
||||
virtual bool is_writer_closed() const = 0;
|
||||
virtual Status &writer_status() = 0;
|
||||
};
|
||||
|
||||
class StreamWriterInterface {
|
||||
public:
|
||||
virtual ~StreamWriterInterface() {
|
||||
}
|
||||
virtual size_t writer_size() = 0;
|
||||
virtual MutableSlice prepare_write() = 0;
|
||||
virtual MutableSlice prepare_write_at_least(size_t size) = 0;
|
||||
virtual void confirm_write(size_t size) = 0;
|
||||
virtual void append(Slice data) = 0;
|
||||
virtual void append(BufferSlice data) {
|
||||
append(data.as_slice());
|
||||
}
|
||||
virtual void append(std::string data) {
|
||||
append(Slice(data));
|
||||
}
|
||||
|
||||
virtual void close_writer(Status error) = 0;
|
||||
virtual bool is_reader_closed() const = 0;
|
||||
virtual Status &reader_status() = 0;
|
||||
};
|
||||
|
||||
// Hide shared_ptr
|
||||
class StreamReader : public StreamReaderInterface {
|
||||
public:
|
||||
StreamReader() = default;
|
||||
StreamReader(std::shared_ptr<StreamReaderInterface> self);
|
||||
size_t reader_size() override;
|
||||
Slice prepare_read() override;
|
||||
Span<IoSlice> prepare_readv() override;
|
||||
void confirm_read(size_t size) override;
|
||||
void close_reader(Status error) override;
|
||||
bool is_writer_closed() const override;
|
||||
Status &writer_status() override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<StreamReaderInterface> self;
|
||||
};
|
||||
|
||||
class StreamWriter : public StreamWriterInterface {
|
||||
public:
|
||||
StreamWriter() = default;
|
||||
StreamWriter(std::shared_ptr<StreamWriterInterface> self);
|
||||
size_t writer_size() override;
|
||||
MutableSlice prepare_write() override;
|
||||
MutableSlice prepare_write_at_least(size_t size) override;
|
||||
void confirm_write(size_t size) override;
|
||||
void append(Slice data) override;
|
||||
void append(BufferSlice data) override;
|
||||
void append(std::string data) override;
|
||||
void close_writer(Status error) override;
|
||||
bool is_reader_closed() const override;
|
||||
Status &reader_status() override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<StreamWriterInterface> self;
|
||||
};
|
||||
|
||||
} // namespace td
|
112
tddb/td/db/utils/StreamToFileActor.cpp
Normal file
112
tddb/td/db/utils/StreamToFileActor.cpp
Normal file
|
@ -0,0 +1,112 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "StreamToFileActor.h"
|
||||
|
||||
namespace td {
|
||||
StreamToFileActor::StreamToFileActor(StreamReader reader, FileFd fd, FileSyncState::Writer sync_state, Options options)
|
||||
: reader_(std::move(reader)), fd_(std::move(fd)), sync_state_(std::move(sync_state)) {
|
||||
}
|
||||
void StreamToFileActor::set_callback(td::unique_ptr<Callback> callback) {
|
||||
callback_ = std::move(callback);
|
||||
callback_->on_sync_state_changed();
|
||||
}
|
||||
|
||||
Result<bool> StreamToFileActor::is_closed() {
|
||||
if (!reader_.is_writer_closed()) {
|
||||
return false;
|
||||
}
|
||||
return reader_.writer_status().clone();
|
||||
}
|
||||
|
||||
Status StreamToFileActor::do_flush_once() {
|
||||
auto size = reader_.reader_size();
|
||||
size_t total_written = 0;
|
||||
while (total_written < size) {
|
||||
auto io_slices = reader_.prepare_readv();
|
||||
TRY_RESULT(written, fd_.writev(io_slices));
|
||||
reader_.confirm_read(written);
|
||||
flushed_size_ += written;
|
||||
total_written += written;
|
||||
}
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status StreamToFileActor::do_sync() {
|
||||
if (flushed_size_ == synced_size_) {
|
||||
return Status::OK();
|
||||
}
|
||||
TRY_STATUS(fd_.sync());
|
||||
synced_size_ = flushed_size_;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
void StreamToFileActor::schedule_sync() {
|
||||
if (synced_size_ == flushed_size_) {
|
||||
return;
|
||||
}
|
||||
if (sync_state_.get_requested_synced_size() > synced_size_) {
|
||||
sync_at_.relax(Timestamp::in(options_.immediate_sync_delay));
|
||||
} else {
|
||||
sync_at_.relax(Timestamp::in(options_.lazy_sync_delay));
|
||||
}
|
||||
}
|
||||
|
||||
Result<bool> StreamToFileActor::do_loop() {
|
||||
// We must first check if writer is closed and then drain all data from reader
|
||||
// Otherwise there will be a race and some of data could be lost.
|
||||
// Also it could be useful to check error and stop immediately.
|
||||
TRY_RESULT(is_closed, is_closed());
|
||||
|
||||
// Flush all data that is awailable on the at the beginning of loop
|
||||
TRY_STATUS(do_flush_once());
|
||||
|
||||
if ((sync_at_ && sync_at_.is_in_past()) || is_closed) {
|
||||
TRY_STATUS(do_sync());
|
||||
sync_at_ = {};
|
||||
}
|
||||
|
||||
bool need_update = sync_state_.set_synced_size(synced_size_) | sync_state_.set_flushed_size(flushed_size_);
|
||||
if (need_update && callback_) {
|
||||
callback_->on_sync_state_changed();
|
||||
}
|
||||
|
||||
if (reader_.reader_size() == 0 && is_closed) {
|
||||
return true;
|
||||
}
|
||||
|
||||
schedule_sync();
|
||||
return false;
|
||||
}
|
||||
|
||||
void StreamToFileActor::start_up() {
|
||||
schedule_sync();
|
||||
}
|
||||
|
||||
void StreamToFileActor::loop() {
|
||||
auto r_is_closed = do_loop();
|
||||
if (r_is_closed.is_error()) {
|
||||
reader_.close_reader(r_is_closed.move_as_error());
|
||||
return stop();
|
||||
} else if (r_is_closed.ok()) {
|
||||
reader_.close_reader(Status::OK());
|
||||
return stop();
|
||||
}
|
||||
alarm_timestamp() = sync_at_;
|
||||
}
|
||||
} // namespace td
|
73
tddb/td/db/utils/StreamToFileActor.h
Normal file
73
tddb/td/db/utils/StreamToFileActor.h
Normal file
|
@ -0,0 +1,73 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#pragma once
|
||||
#include "StreamInterface.h"
|
||||
#include "FileSyncState.h"
|
||||
|
||||
#include "td/utils/Time.h"
|
||||
#include "td/utils/port/FileFd.h"
|
||||
|
||||
#include "td/actor/actor.h"
|
||||
|
||||
namespace td {
|
||||
class StreamToFileActor : public actor::Actor {
|
||||
public:
|
||||
struct Options {
|
||||
Options() {
|
||||
}
|
||||
double lazy_sync_delay = 10;
|
||||
double immediate_sync_delay = 0.001;
|
||||
};
|
||||
|
||||
class Callback {
|
||||
public:
|
||||
virtual ~Callback() {
|
||||
}
|
||||
virtual void on_sync_state_changed() = 0;
|
||||
};
|
||||
|
||||
StreamToFileActor(StreamReader reader, FileFd fd, FileSyncState::Writer sync_state, Options options = {});
|
||||
void set_callback(td::unique_ptr<Callback> callback);
|
||||
|
||||
private:
|
||||
StreamReader reader_;
|
||||
FileFd fd_;
|
||||
Timestamp sync_at_;
|
||||
Options options_;
|
||||
FileSyncState::Writer sync_state_;
|
||||
unique_ptr<Callback> callback_;
|
||||
|
||||
size_t flushed_size_{0};
|
||||
size_t synced_size_{0};
|
||||
|
||||
TD_WARN_UNUSED_RESULT Result<bool> is_closed();
|
||||
|
||||
Status do_flush_once();
|
||||
|
||||
Status do_sync();
|
||||
|
||||
void schedule_sync();
|
||||
|
||||
TD_WARN_UNUSED_RESULT Result<bool> do_loop();
|
||||
|
||||
void start_up() override;
|
||||
|
||||
void loop() override;
|
||||
};
|
||||
} // namespace td
|
810
tddb/test/binlog.cpp
Normal file
810
tddb/test/binlog.cpp
Normal file
|
@ -0,0 +1,810 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "td/utils/tests.h"
|
||||
|
||||
#include "td/utils/as.h"
|
||||
#include "td/utils/base64.h"
|
||||
#include "td/utils/benchmark.h"
|
||||
#include "td/utils/buffer.h"
|
||||
#include "td/utils/crypto.h"
|
||||
#include "td/utils/filesystem.h"
|
||||
#include "td/utils/Slice.h"
|
||||
#include "td/utils/Span.h"
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/overloaded.h"
|
||||
#include "td/utils/optional.h"
|
||||
#include "td/utils/port/FileFd.h"
|
||||
#include "td/utils/port/path.h"
|
||||
#include "td/utils/port/IoSlice.h"
|
||||
#include "td/utils/UInt.h"
|
||||
#include "td/utils/Variant.h"
|
||||
#include "td/utils/VectorQueue.h"
|
||||
|
||||
#include "td/actor/actor.h"
|
||||
|
||||
#include "td/db/utils/StreamInterface.h"
|
||||
#include "td/db/utils/ChainBuffer.h"
|
||||
#include "td/db/utils/CyclicBuffer.h"
|
||||
#include "td/db/binlog/BinlogReaderHelper.h"
|
||||
|
||||
#include "td/db/binlog/Binlog.h"
|
||||
|
||||
#include <ctime>
|
||||
|
||||
// Toy Binlog Implementation
|
||||
using td::int64;
|
||||
using td::MutableSlice;
|
||||
using td::Result;
|
||||
using td::Slice;
|
||||
using td::Status;
|
||||
|
||||
using RootHash = td::UInt256;
|
||||
using FileHash = td::UInt256;
|
||||
struct BlockId {
|
||||
int workchain;
|
||||
unsigned seqno;
|
||||
unsigned long long shard;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
Result<int64> memcpy_parse(Slice data, T* res) {
|
||||
if (data.size() < sizeof(T)) {
|
||||
return -static_cast<int64>(sizeof(T));
|
||||
}
|
||||
std::memcpy(res, data.data(), sizeof(T));
|
||||
if (res->tag_field != res->tag) {
|
||||
return Status::Error("Tag mismatch");
|
||||
}
|
||||
return sizeof(T);
|
||||
}
|
||||
template <class T>
|
||||
int64 memcpy_serialize(MutableSlice data, const T& res) {
|
||||
if (data.size() < sizeof(T)) {
|
||||
return -static_cast<int64>(sizeof(T));
|
||||
}
|
||||
std::memcpy(data.data(), &res, sizeof(T));
|
||||
return sizeof(T);
|
||||
}
|
||||
|
||||
#pragma pack(push, 4)
|
||||
struct LogEventCrc32C {
|
||||
static constexpr unsigned tag = 0x473a830a;
|
||||
|
||||
unsigned tag_field;
|
||||
td::uint32 crc32c;
|
||||
LogEventCrc32C() = default;
|
||||
LogEventCrc32C(td::uint32 crc32c) : tag_field(tag), crc32c(crc32c) {
|
||||
}
|
||||
static Result<int64> parse(Slice data, LogEventCrc32C* res) {
|
||||
return memcpy_parse(data, res);
|
||||
}
|
||||
int64 serialize(MutableSlice data) const {
|
||||
return memcpy_serialize(data, *this);
|
||||
}
|
||||
auto key() const {
|
||||
return crc32c;
|
||||
}
|
||||
bool operator==(const LogEventCrc32C& other) const {
|
||||
return key() == other.key();
|
||||
}
|
||||
bool operator!=(const LogEventCrc32C& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
struct LogEventStart {
|
||||
static constexpr unsigned tag = 0x0442446b;
|
||||
static constexpr unsigned log_type = 0x290100;
|
||||
unsigned tag_field;
|
||||
unsigned type_field;
|
||||
unsigned created_at;
|
||||
unsigned char zerostate_root_hash[32];
|
||||
LogEventStart() = default;
|
||||
LogEventStart(const RootHash& hash, unsigned _now = 0)
|
||||
: tag_field(tag), type_field(log_type), created_at(_now ? _now : (unsigned)std::time(nullptr)) {
|
||||
td::as<RootHash>(zerostate_root_hash) = hash;
|
||||
}
|
||||
static Result<int64> parse(Slice data, LogEventStart* res) {
|
||||
return memcpy_parse(data, res);
|
||||
}
|
||||
int64 serialize(MutableSlice data) const {
|
||||
return memcpy_serialize(data, *this);
|
||||
}
|
||||
auto key() const {
|
||||
return std::make_tuple(tag_field, type_field, created_at, Slice(zerostate_root_hash, 32));
|
||||
}
|
||||
bool operator==(const LogEventStart& other) const {
|
||||
return key() == other.key();
|
||||
}
|
||||
bool operator!=(const LogEventStart& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
struct LogEventSetZeroState {
|
||||
static constexpr unsigned tag = 0x63ab3cd9;
|
||||
unsigned tag_field;
|
||||
unsigned flags;
|
||||
long long file_size;
|
||||
unsigned char file_hash[32];
|
||||
unsigned char root_hash[32];
|
||||
LogEventSetZeroState() = default;
|
||||
LogEventSetZeroState(const RootHash& rhash, const FileHash& fhash, unsigned long long _fsize, unsigned _flags = 0)
|
||||
: tag_field(tag), flags(_flags), file_size(_fsize) {
|
||||
td::as<FileHash>(file_hash) = fhash;
|
||||
td::as<RootHash>(root_hash) = rhash;
|
||||
}
|
||||
static Result<int64> parse(Slice data, LogEventSetZeroState* res) {
|
||||
return memcpy_parse(data, res);
|
||||
}
|
||||
int64 serialize(MutableSlice data) const {
|
||||
return memcpy_serialize(data, *this);
|
||||
}
|
||||
auto key() const {
|
||||
return std::make_tuple(tag_field, flags, file_size, Slice(file_hash, 32), Slice(root_hash, 32));
|
||||
}
|
||||
bool operator==(const LogEventSetZeroState& other) const {
|
||||
return key() == other.key();
|
||||
}
|
||||
bool operator!=(const LogEventSetZeroState& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
struct LogEventNewBlock {
|
||||
static constexpr unsigned tag = 0x19f4bc63;
|
||||
unsigned tag_field;
|
||||
unsigned flags; // lower 8 bits = authority
|
||||
int workchain;
|
||||
unsigned seqno;
|
||||
unsigned long long shard;
|
||||
long long file_size;
|
||||
unsigned char file_hash[32];
|
||||
unsigned char root_hash[32];
|
||||
unsigned char last_bytes[8];
|
||||
LogEventNewBlock() = default;
|
||||
LogEventNewBlock(const BlockId& block, const RootHash& rhash, const FileHash& fhash, unsigned long long _fsize,
|
||||
unsigned _flags)
|
||||
: tag_field(tag)
|
||||
, flags(_flags)
|
||||
, workchain(block.workchain)
|
||||
, seqno(block.seqno)
|
||||
, shard(block.shard)
|
||||
, file_size(_fsize) {
|
||||
td::as<FileHash>(file_hash) = fhash;
|
||||
td::as<RootHash>(root_hash) = rhash;
|
||||
td::as<unsigned long long>(last_bytes) = 0;
|
||||
}
|
||||
static Result<int64> parse(Slice data, LogEventNewBlock* res) {
|
||||
return memcpy_parse(data, res);
|
||||
}
|
||||
int64 serialize(MutableSlice data) const {
|
||||
return memcpy_serialize(data, *this);
|
||||
}
|
||||
auto key() const {
|
||||
return std::make_tuple(tag_field, flags, workchain, seqno, shard, file_size, Slice(file_hash, 32),
|
||||
Slice(root_hash, 32), Slice(last_bytes, 8));
|
||||
}
|
||||
bool operator==(const LogEventNewBlock& other) const {
|
||||
return key() == other.key();
|
||||
}
|
||||
bool operator!=(const LogEventNewBlock& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
|
||||
struct LogEventNewState {
|
||||
static constexpr unsigned tag = 0x4190a21f;
|
||||
unsigned tag_field;
|
||||
unsigned flags; // lower 8 bits = authority
|
||||
int workchain;
|
||||
unsigned seqno;
|
||||
unsigned long long shard;
|
||||
long long file_size;
|
||||
unsigned char file_hash[32];
|
||||
unsigned char root_hash[32];
|
||||
unsigned char last_bytes[8];
|
||||
LogEventNewState() = default;
|
||||
LogEventNewState(const BlockId& state, const RootHash& rhash, const FileHash& fhash, unsigned long long _fsize,
|
||||
unsigned _flags)
|
||||
: tag_field(tag)
|
||||
, flags(_flags)
|
||||
, workchain(state.workchain)
|
||||
, seqno(state.seqno)
|
||||
, shard(state.shard)
|
||||
, file_size(_fsize) {
|
||||
td::as<FileHash>(file_hash) = fhash;
|
||||
td::as<RootHash>(root_hash) = rhash;
|
||||
td::as<unsigned long long>(last_bytes) = 0;
|
||||
}
|
||||
static Result<int64> parse(Slice data, LogEventNewState* res) {
|
||||
return memcpy_parse(data, res);
|
||||
}
|
||||
int64 serialize(MutableSlice data) const {
|
||||
return memcpy_serialize(data, *this);
|
||||
}
|
||||
auto key() const {
|
||||
return std::make_tuple(tag_field, flags, workchain, seqno, shard, file_size, Slice(file_hash, 32),
|
||||
Slice(root_hash, 32), Slice(last_bytes, 8));
|
||||
}
|
||||
bool operator==(const LogEventNewState& other) const {
|
||||
return key() == other.key();
|
||||
}
|
||||
bool operator!=(const LogEventNewState& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
};
|
||||
#pragma pack(pop)
|
||||
|
||||
struct LogEventString {
|
||||
static constexpr unsigned tag = 0xabcdabcd;
|
||||
|
||||
std::string data;
|
||||
|
||||
bool operator==(const LogEventString& other) const {
|
||||
return data == other.data;
|
||||
}
|
||||
bool operator!=(const LogEventString& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
int64 serialize(MutableSlice dest) const {
|
||||
size_t need_size = 8 + data.size();
|
||||
if (dest.size() < need_size) {
|
||||
return -static_cast<int64>(need_size);
|
||||
}
|
||||
dest.truncate(need_size);
|
||||
td::as<unsigned>(dest.data()) = tag;
|
||||
td::as<int>(dest.data() + 4) = td::narrow_cast<int>(data.size());
|
||||
dest.substr(8).copy_from(data);
|
||||
return dest.size();
|
||||
}
|
||||
|
||||
static Result<int64> parse(Slice data, LogEventString* res) {
|
||||
if (data.size() < 4) {
|
||||
return -4;
|
||||
}
|
||||
unsigned got_tag = td::as<unsigned>(data.data());
|
||||
if (got_tag != tag) {
|
||||
return Status::Error(PSLICE() << "tag mismatch " << td::format::as_hex(got_tag));
|
||||
}
|
||||
data = data.substr(4);
|
||||
if (data.size() < 4) {
|
||||
return -8;
|
||||
}
|
||||
td::int64 length = td::as<td::uint32>(data.data());
|
||||
data = data.substr(4);
|
||||
if (static_cast<int64>(data.size()) < length) {
|
||||
return -length - 8;
|
||||
}
|
||||
res->data = data.substr(0, td::narrow_cast<std::size_t>(length)).str();
|
||||
return length + 8;
|
||||
}
|
||||
};
|
||||
|
||||
struct LogEvent {
|
||||
td::Variant<LogEventCrc32C, LogEventStart, LogEventString, LogEventNewBlock, LogEventNewState, LogEventSetZeroState>
|
||||
event_{LogEventStart{}};
|
||||
|
||||
bool operator==(const LogEvent& other) const {
|
||||
return event_ == other.event_;
|
||||
}
|
||||
bool operator!=(const LogEvent& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
LogEvent() = default;
|
||||
LogEvent(LogEvent&& other) = default;
|
||||
template <class T>
|
||||
LogEvent(T&& e) : event_(std::forward<T>(e)) {
|
||||
}
|
||||
|
||||
int64 serialize(MutableSlice data) const {
|
||||
int64 res;
|
||||
event_.visit([&](auto& e) { res = e.serialize(data); });
|
||||
return res;
|
||||
}
|
||||
|
||||
static Result<int64> parse(Slice data, LogEvent* res) {
|
||||
if (data.size() < 4) {
|
||||
return -4;
|
||||
}
|
||||
//LOG(ERROR) << td::format::as_hex_dump<4>(data);
|
||||
unsigned got_tag = td::as<unsigned>(data.data());
|
||||
switch (got_tag) {
|
||||
case LogEventCrc32C::tag: {
|
||||
LogEventCrc32C e;
|
||||
TRY_RESULT(x, e.parse(data, &e));
|
||||
if (x >= 0) {
|
||||
res->event_ = e;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
case LogEventStart::tag: {
|
||||
LogEventStart e;
|
||||
TRY_RESULT(x, e.parse(data, &e));
|
||||
if (x >= 0) {
|
||||
res->event_ = e;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
case LogEventSetZeroState::tag: {
|
||||
LogEventSetZeroState e;
|
||||
TRY_RESULT(x, e.parse(data, &e));
|
||||
if (x >= 0) {
|
||||
res->event_ = e;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
case LogEventNewBlock::tag: {
|
||||
LogEventNewBlock e;
|
||||
TRY_RESULT(x, e.parse(data, &e));
|
||||
if (x >= 0) {
|
||||
res->event_ = e;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
case LogEventNewState::tag: {
|
||||
LogEventNewState e;
|
||||
TRY_RESULT(x, e.parse(data, &e));
|
||||
if (x >= 0) {
|
||||
res->event_ = e;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
case LogEventString::tag: {
|
||||
LogEventString e;
|
||||
TRY_RESULT(x, e.parse(data, &e));
|
||||
if (x >= 0) {
|
||||
res->event_ = e;
|
||||
}
|
||||
return x;
|
||||
}
|
||||
default:
|
||||
return Status::Error(PSLICE() << "Unknown tag: " << td::format::as_hex(got_tag));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
static td::CSlice test_binlog_path("test.binlog");
|
||||
|
||||
class BinlogReader : public td::BinlogReaderInterface {
|
||||
public:
|
||||
td::Span<LogEvent> logevents() const {
|
||||
return logevents_;
|
||||
}
|
||||
|
||||
td::Result<td::int64> parse(td::Slice data) override {
|
||||
if (data.size() < 4) {
|
||||
return -4;
|
||||
}
|
||||
LogEvent res;
|
||||
TRY_RESULT(size, res.parse(data, &res));
|
||||
if (size > 0) {
|
||||
if (res.event_.get_offset() == res.event_.offset<LogEventCrc32C>()) {
|
||||
auto crc = res.event_.get<LogEventCrc32C>().crc32c;
|
||||
flush_crc();
|
||||
if (crc != crc_) {
|
||||
return Status::Error("Crc mismatch");
|
||||
}
|
||||
} else {
|
||||
logevents_.emplace_back(std::move(res));
|
||||
}
|
||||
lazy_crc_extend(data.substr(0, td::narrow_cast<std::size_t>(size)));
|
||||
}
|
||||
return size;
|
||||
}
|
||||
|
||||
td::uint32 crc32c() {
|
||||
flush_crc();
|
||||
return crc_;
|
||||
}
|
||||
|
||||
void flush() override {
|
||||
flush_crc();
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<LogEvent> logevents_;
|
||||
td::uint32 crc_{0};
|
||||
td::Slice suffix_;
|
||||
|
||||
void flush_crc() {
|
||||
crc_ = td::crc32c_extend(crc_, suffix_);
|
||||
suffix_ = Slice();
|
||||
}
|
||||
void lazy_crc_extend(Slice slice) {
|
||||
if (suffix_.empty()) {
|
||||
suffix_ = slice;
|
||||
return;
|
||||
}
|
||||
if (suffix_.end() == slice.begin()) {
|
||||
suffix_ = Slice(suffix_.begin(), slice.end());
|
||||
return;
|
||||
}
|
||||
flush_crc();
|
||||
suffix_ = slice;
|
||||
}
|
||||
};
|
||||
|
||||
class RandomBinlog {
|
||||
public:
|
||||
RandomBinlog() {
|
||||
size_t logevent_count = 1000;
|
||||
for (size_t i = 0; i < logevent_count; i++) {
|
||||
add_logevent(create_random_logevent());
|
||||
}
|
||||
}
|
||||
|
||||
Slice data() const {
|
||||
return data_;
|
||||
}
|
||||
td::Span<LogEvent> logevents() const {
|
||||
return logevents_;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<LogEvent> logevents_;
|
||||
std::string data_;
|
||||
|
||||
template <class T>
|
||||
void add_logevent(T event) {
|
||||
int64 size = -event.serialize({});
|
||||
std::string data(td::narrow_cast<std::size_t>(size), '\0');
|
||||
int64 new_size = event.serialize(data);
|
||||
CHECK(new_size == size);
|
||||
data_ += data;
|
||||
logevents_.emplace_back(std::move(event));
|
||||
}
|
||||
|
||||
LogEvent create_random_logevent() {
|
||||
auto rand_uint256 = [] {
|
||||
td::UInt256 res;
|
||||
td::Random::secure_bytes(as_slice(res));
|
||||
return res;
|
||||
};
|
||||
auto rand_block_id = [] {
|
||||
BlockId res;
|
||||
res.workchain = td::Random::fast(0, 100);
|
||||
res.shard = td::Random::fast(0, 100);
|
||||
res.seqno = td::Random::fast(0, 100);
|
||||
return res;
|
||||
};
|
||||
|
||||
auto type = td::Random::fast(0, 4);
|
||||
switch (type) {
|
||||
case 0: {
|
||||
auto size = td::Random::fast(0, 10);
|
||||
LogEventString event;
|
||||
event.data = td::rand_string('a', 'z', size);
|
||||
return event;
|
||||
}
|
||||
case 1: {
|
||||
return LogEventStart(rand_uint256(), 12);
|
||||
}
|
||||
case 2: {
|
||||
return LogEventSetZeroState(rand_uint256(), rand_uint256(), td::Random::fast(0, 1000),
|
||||
td::Random::fast(0, 1000));
|
||||
}
|
||||
case 3: {
|
||||
return LogEventNewBlock(rand_block_id(), rand_uint256(), rand_uint256(), 12, 17);
|
||||
}
|
||||
case 4: {
|
||||
return LogEventNewState(rand_block_id(), rand_uint256(), rand_uint256(), 12, 17);
|
||||
}
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
};
|
||||
|
||||
void test_binlog(td::Slice data, td::optional<td::Span<LogEvent>> events = {}) {
|
||||
auto splitted_binlog = td::rand_split(data);
|
||||
|
||||
std::string new_binlog_data;
|
||||
|
||||
BinlogReader reader;
|
||||
td::BinlogReaderHelper reader_impl;
|
||||
for (auto& chunk : splitted_binlog) {
|
||||
reader_impl.parse(reader, chunk).ensure();
|
||||
}
|
||||
|
||||
//Binlog write sync
|
||||
{
|
||||
td::Binlog::destroy(test_binlog_path);
|
||||
td::BinlogWriter binlog_writer(test_binlog_path.str());
|
||||
binlog_writer.open().ensure();
|
||||
|
||||
BinlogReader new_reader;
|
||||
size_t i = 0;
|
||||
for (auto& logevent : reader.logevents()) {
|
||||
binlog_writer.write_event(logevent, &new_reader).ensure();
|
||||
i++;
|
||||
if (i % 10 == 0) {
|
||||
binlog_writer.write_event(LogEvent(LogEventCrc32C(new_reader.crc32c())), &new_reader).ensure();
|
||||
}
|
||||
}
|
||||
binlog_writer.sync();
|
||||
binlog_writer.close().ensure();
|
||||
|
||||
auto file_data = read_file(test_binlog_path).move_as_ok();
|
||||
ASSERT_TRUE(reader.logevents() == new_reader.logevents());
|
||||
new_binlog_data = file_data.as_slice().str();
|
||||
data = new_binlog_data;
|
||||
//ASSERT_EQ(data, file_data);
|
||||
}
|
||||
|
||||
//Binlog write async
|
||||
{
|
||||
td::Binlog::destroy(test_binlog_path);
|
||||
td::BinlogWriterAsync binlog_writer(test_binlog_path.str());
|
||||
|
||||
td::actor::Scheduler scheduler({2});
|
||||
|
||||
BinlogReader new_reader;
|
||||
scheduler.run_in_context([&]() mutable {
|
||||
binlog_writer.open().ensure();
|
||||
for (auto& logevent : reader.logevents()) {
|
||||
binlog_writer.write_event(logevent, &new_reader).ensure();
|
||||
}
|
||||
binlog_writer.sync([&](Result<td::Unit> res) {
|
||||
res.ensure();
|
||||
binlog_writer.close([&](Result<td::Unit> res) {
|
||||
res.ensure();
|
||||
td::actor::SchedulerContext::get()->stop();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
scheduler.run();
|
||||
scheduler.stop();
|
||||
|
||||
auto file_data = read_file(test_binlog_path).move_as_ok();
|
||||
ASSERT_TRUE(reader.logevents() == new_reader.logevents());
|
||||
//ASSERT_EQ(data, file_data);
|
||||
}
|
||||
|
||||
ASSERT_TRUE(!events || events.value() == reader.logevents());
|
||||
|
||||
std::string new_data;
|
||||
for (auto& event : reader.logevents()) {
|
||||
int64 size = -event.serialize({});
|
||||
std::string event_data(td::narrow_cast<std::size_t>(size), '\0');
|
||||
int64 new_size = event.serialize(event_data);
|
||||
CHECK(new_size == size);
|
||||
new_data += event_data;
|
||||
}
|
||||
//ASSERT_EQ(data, new_data);
|
||||
|
||||
// Binlog::read_sync
|
||||
{
|
||||
td::CSlice path("test.binlog");
|
||||
td::Binlog::destroy(path);
|
||||
td::write_file(path, data).ensure();
|
||||
|
||||
td::Binlog binlog(path.str());
|
||||
BinlogReader binlog_reader;
|
||||
binlog.replay_sync(binlog_reader).ensure();
|
||||
|
||||
ASSERT_EQ(reader.logevents().size(), binlog_reader.logevents().size());
|
||||
ASSERT_TRUE(reader.logevents() == binlog_reader.logevents());
|
||||
}
|
||||
|
||||
// Binlog::read_async
|
||||
{
|
||||
td::Binlog::destroy(test_binlog_path);
|
||||
td::write_file(test_binlog_path, data).ensure();
|
||||
|
||||
td::Binlog binlog(test_binlog_path.str());
|
||||
auto binlog_reader = std::make_shared<BinlogReader>();
|
||||
|
||||
td::actor::Scheduler scheduler({2});
|
||||
scheduler.run_in_context([&]() mutable {
|
||||
binlog.replay_async(binlog_reader, [](Result<td::Unit> res) {
|
||||
res.ensure();
|
||||
td::actor::SchedulerContext::get()->stop();
|
||||
});
|
||||
});
|
||||
|
||||
scheduler.run();
|
||||
scheduler.stop();
|
||||
|
||||
ASSERT_EQ(reader.logevents().size(), binlog_reader->logevents().size());
|
||||
ASSERT_TRUE(reader.logevents() == binlog_reader->logevents());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Binlog, Reader) {
|
||||
RandomBinlog binlog;
|
||||
test_binlog(binlog.data(), binlog.logevents());
|
||||
}
|
||||
|
||||
TEST(Binlog, Hands) {
|
||||
std::string binlog = td::base64_decode(
|
||||
"a0RCBAABKQCRMn1c2DaJhwrptxburpRtrWI2sjGhVbG29bFO0r8DDtAAExjZPKtjAAAAALwGAAAA"
|
||||
"AAAAFvJq3qfzFCDWap+LUrgBI8sWFayIOQSxkBjV3CWgizHYNomHCum3Fu6ulG2tYjayMaFVsbb1"
|
||||
"sU7SvwMO0AATGGO89BmAAAAA/////wEAAAAAAAAAAAAAgN4RAAAAAAAAa53L4ziGleZ7K+StAsBd"
|
||||
"txMxbHHfuB9SJRFp+BMzXfnGnt8TsgFnig7j/xVRjtIsYUVw0rQZJUC0sWQROj0SHvplIkBV9vMp")
|
||||
.move_as_ok();
|
||||
test_binlog(binlog);
|
||||
}
|
||||
|
||||
TEST(Buffers, CyclicBufferSimple) {
|
||||
{
|
||||
auto reader_writer = td::CyclicBuffer::create();
|
||||
auto reader = std::move(reader_writer.first);
|
||||
auto writer = std::move(reader_writer.second);
|
||||
|
||||
ASSERT_TRUE(!writer.is_reader_closed());
|
||||
reader.close_reader(td::Status::Error(2));
|
||||
ASSERT_TRUE(!reader.is_writer_closed());
|
||||
ASSERT_TRUE(writer.is_reader_closed());
|
||||
ASSERT_EQ(2, writer.reader_status().code());
|
||||
}
|
||||
{
|
||||
auto reader_writer = td::CyclicBuffer::create();
|
||||
auto reader = std::move(reader_writer.first);
|
||||
auto writer = std::move(reader_writer.second);
|
||||
|
||||
ASSERT_TRUE(!reader.is_writer_closed());
|
||||
writer.close_writer(td::Status::Error(2));
|
||||
ASSERT_TRUE(!writer.is_reader_closed());
|
||||
ASSERT_TRUE(reader.is_writer_closed());
|
||||
ASSERT_EQ(2, reader.writer_status().code());
|
||||
}
|
||||
{
|
||||
td::CyclicBuffer::Options options;
|
||||
options.chunk_size = 14;
|
||||
options.count = 10;
|
||||
options.alignment = 7;
|
||||
auto reader_writer = td::CyclicBuffer::create(options);
|
||||
auto reader = std::move(reader_writer.first);
|
||||
auto writer = std::move(reader_writer.second);
|
||||
|
||||
auto data = td::rand_string('a', 'z', 100001);
|
||||
td::Slice write_slice = data;
|
||||
td::Slice read_slice = data;
|
||||
for (size_t i = 1; (int)i < options.count; i++) {
|
||||
ASSERT_EQ((i - 1) * options.chunk_size, reader.reader_size());
|
||||
ASSERT_EQ((i - 1) * options.chunk_size, writer.writer_size());
|
||||
auto slice = writer.prepare_write();
|
||||
ASSERT_EQ(0u, reinterpret_cast<td::uint64>(slice.data()) % options.alignment);
|
||||
auto to_copy = write_slice;
|
||||
to_copy.truncate(options.chunk_size);
|
||||
slice.copy_from(to_copy);
|
||||
write_slice = write_slice.substr(to_copy.size());
|
||||
writer.confirm_write(to_copy.size());
|
||||
ASSERT_EQ(i * options.chunk_size, reader.reader_size());
|
||||
ASSERT_EQ(i * options.chunk_size, writer.writer_size());
|
||||
}
|
||||
bool is_writer_closed = false;
|
||||
while (true) {
|
||||
{
|
||||
bool is_closed = reader.is_writer_closed();
|
||||
auto slice = reader.prepare_read();
|
||||
ASSERT_EQ(read_slice.substr(0, slice.size()), slice);
|
||||
read_slice = read_slice.substr(slice.size());
|
||||
reader.confirm_read(slice.size());
|
||||
if (is_closed && slice.empty()) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_writer_closed) {
|
||||
auto slice = writer.prepare_write();
|
||||
auto to_copy = write_slice;
|
||||
to_copy.truncate(options.chunk_size);
|
||||
if (to_copy.empty()) {
|
||||
writer.close_writer(td::Status::OK());
|
||||
is_writer_closed = true;
|
||||
} else {
|
||||
slice.copy_from(to_copy);
|
||||
write_slice = write_slice.substr(to_copy.size());
|
||||
writer.confirm_write(to_copy.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(0u, write_slice.size());
|
||||
ASSERT_EQ(0u, read_slice.size());
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Buffers, CyclicBuffer) {
|
||||
for (int t = 0; t < 20; t++) {
|
||||
td::CyclicBuffer::Options options;
|
||||
options.chunk_size = 14;
|
||||
options.count = 10;
|
||||
options.alignment = 7;
|
||||
auto reader_writer = td::CyclicBuffer::create(options);
|
||||
auto reader = std::move(reader_writer.first);
|
||||
auto writer = std::move(reader_writer.second);
|
||||
auto data = td::rand_string('a', 'z', 100001);
|
||||
auto chunks = td::rand_split(data);
|
||||
|
||||
size_t chunk_i = 0;
|
||||
std::string res;
|
||||
while (true) {
|
||||
if (td::Random::fast(0, 1) == 0) {
|
||||
bool is_closed = reader.is_writer_closed();
|
||||
auto slice = reader.prepare_read();
|
||||
res += slice.str();
|
||||
reader.confirm_read(slice.size());
|
||||
if (slice.empty() && is_closed) {
|
||||
reader.writer_status().ensure();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (chunk_i < chunks.size() && td::Random::fast(0, 1) == 0) {
|
||||
auto slice = writer.prepare_write();
|
||||
auto from = Slice(chunks[chunk_i]);
|
||||
auto copy = from.substr(0, slice.size());
|
||||
slice.copy_from(copy);
|
||||
writer.confirm_write(copy.size());
|
||||
auto left = from.substr(copy.size());
|
||||
if (!left.empty()) {
|
||||
chunks[chunk_i] = left.str();
|
||||
} else {
|
||||
chunk_i++;
|
||||
if (chunk_i == chunks.size()) {
|
||||
writer.close_writer(td::Status::OK());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(data, res);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Buffers, ChainBuffer) {
|
||||
for (int t = 0; t < 20; t++) {
|
||||
td::ChainBuffer::Options options;
|
||||
options.chunk_size = 14;
|
||||
auto reader_writer = td::ChainBuffer::create(options);
|
||||
auto reader = std::move(reader_writer.first);
|
||||
auto writer = std::move(reader_writer.second);
|
||||
auto data = td::rand_string('a', 'z', 100001);
|
||||
auto chunks = td::rand_split(data);
|
||||
|
||||
size_t chunk_i = 0;
|
||||
std::string res;
|
||||
while (true) {
|
||||
if (td::Random::fast(0, 1) == 0) {
|
||||
bool is_closed = reader.is_writer_closed();
|
||||
Slice slice;
|
||||
if (reader.reader_size() != 0) {
|
||||
slice = reader.prepare_read();
|
||||
res += slice.str();
|
||||
reader.confirm_read(slice.size());
|
||||
}
|
||||
if (slice.empty() && is_closed) {
|
||||
reader.writer_status().ensure();
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (chunk_i < chunks.size() && td::Random::fast(0, 1) == 0) {
|
||||
writer.append(chunks[chunk_i]);
|
||||
chunk_i++;
|
||||
if (chunk_i == chunks.size()) {
|
||||
writer.close_writer(td::Status::OK());
|
||||
}
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(data.size(), res.size());
|
||||
ASSERT_EQ(data, res);
|
||||
}
|
||||
}
|
685
tddb/test/io-bench.cpp
Normal file
685
tddb/test/io-bench.cpp
Normal file
|
@ -0,0 +1,685 @@
|
|||
/*
|
||||
This file is part of TON Blockchain source code.
|
||||
|
||||
TON Blockchain is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU General Public License
|
||||
as published by the Free Software Foundation; either version 2
|
||||
of the License, or (at your option) any later version.
|
||||
|
||||
TON Blockchain is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with TON Blockchain. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
In addition, as a special exception, the copyright holders give permission
|
||||
to link the code of portions of this program with the OpenSSL library.
|
||||
You must obey the GNU General Public License in all respects for all
|
||||
of the code used other than OpenSSL. If you modify file(s) with this
|
||||
exception, you may extend this exception to your version of the file(s),
|
||||
but you are not obligated to do so. If you do not wish to do so, delete this
|
||||
exception statement from your version. If you delete this exception statement
|
||||
from all source files in the program, then also delete it here.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "td/utils/OptionsParser.h"
|
||||
#include "td/utils/filesystem.h"
|
||||
#include "td/utils/port/FileFd.h"
|
||||
#include "td/utils/Timer.h"
|
||||
#include "td/utils/crypto.h"
|
||||
#include "td/utils/BufferedReader.h"
|
||||
#include "td/utils/optional.h"
|
||||
#include "td/actor/actor.h"
|
||||
|
||||
#include "td/db/utils/StreamInterface.h"
|
||||
#include "td/db/utils/ChainBuffer.h"
|
||||
#include "td/db/utils/CyclicBuffer.h"
|
||||
#include "td/db/utils/FileSyncState.h"
|
||||
#include "td/db/utils/StreamToFileActor.h"
|
||||
#include "td/db/utils/FileToStreamActor.h"
|
||||
|
||||
#include <cmath>
|
||||
|
||||
namespace td {
|
||||
class AsyncCyclicBufferReader : public td::actor::Actor {
|
||||
public:
|
||||
class Callback {
|
||||
public:
|
||||
virtual ~Callback() {
|
||||
}
|
||||
virtual void want_more() = 0;
|
||||
virtual Status process(Slice data) = 0;
|
||||
virtual void on_closed(Status status) = 0;
|
||||
};
|
||||
AsyncCyclicBufferReader(CyclicBuffer::Reader reader, td::unique_ptr<Callback> callback)
|
||||
: reader_(std::move(reader)), callback_(std::move(callback)) {
|
||||
}
|
||||
|
||||
private:
|
||||
CyclicBuffer::Reader reader_;
|
||||
td::unique_ptr<Callback> callback_;
|
||||
|
||||
void loop() override {
|
||||
while (true) {
|
||||
auto data = reader_.prepare_read();
|
||||
if (data.empty()) {
|
||||
if (reader_.is_writer_closed()) {
|
||||
callback_->on_closed(std::move(reader_.writer_status()));
|
||||
return stop();
|
||||
}
|
||||
callback_->want_more();
|
||||
return;
|
||||
}
|
||||
auto status = callback_->process(data);
|
||||
if (status.is_error()) {
|
||||
callback_->on_closed(std::move(status));
|
||||
}
|
||||
reader_.confirm_read(data.size());
|
||||
//TODO: better condition for want_more. May be reader should decide if it is ready for more writes
|
||||
callback_->want_more();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace td
|
||||
|
||||
class Processor {
|
||||
public:
|
||||
void process(td::Slice slice) {
|
||||
res = crc32c_extend(res, slice);
|
||||
res2 = crc32c_extend(res2, slice);
|
||||
}
|
||||
auto result() {
|
||||
return res * res2;
|
||||
}
|
||||
|
||||
private:
|
||||
td::uint32 res{0};
|
||||
td::uint32 res2{0};
|
||||
};
|
||||
|
||||
void read_baseline(td::CSlice path) {
|
||||
LOG(ERROR) << "BASELINE";
|
||||
td::PerfWarningTimer timer("read file");
|
||||
auto data = td::read_file(path).move_as_ok();
|
||||
timer.reset();
|
||||
|
||||
td::PerfWarningTimer process_timer("process file", 0);
|
||||
Processor processor;
|
||||
processor.process(data.as_slice());
|
||||
process_timer.reset();
|
||||
LOG(ERROR) << processor.result();
|
||||
}
|
||||
|
||||
void read_buffered(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "BufferedReader";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Read).move_as_ok();
|
||||
td::BufferedReader reader(fd, buffer_size);
|
||||
std::vector<char> buf(buffer_size);
|
||||
Processor processor;
|
||||
while (true) {
|
||||
auto slice = td::MutableSlice(&buf[0], buf.size());
|
||||
auto size = reader.read(slice).move_as_ok();
|
||||
if (size == 0) {
|
||||
break;
|
||||
}
|
||||
processor.process(slice.truncate(size));
|
||||
}
|
||||
LOG(ERROR) << processor.result();
|
||||
}
|
||||
|
||||
void read_async(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "Async";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Read).move_as_ok();
|
||||
td::actor::Scheduler scheduler({2});
|
||||
scheduler.run_in_context([&] {
|
||||
auto reader_writer = td::CyclicBuffer::create();
|
||||
//TODO: hide actor
|
||||
auto reader =
|
||||
td::actor::create_actor<td::FileToStreamActor>("Reader", std::move(fd), std::move(reader_writer.second));
|
||||
class Callback : public td::AsyncCyclicBufferReader::Callback {
|
||||
public:
|
||||
Callback(td::actor::ActorOwn<> reader) : reader_(std::move(reader)) {
|
||||
}
|
||||
void want_more() override {
|
||||
td::actor::send_signals_later(reader_, td::actor::ActorSignals::wakeup());
|
||||
}
|
||||
td::Status process(td::Slice data) override {
|
||||
processor.process(data);
|
||||
return td::Status::OK();
|
||||
}
|
||||
void on_closed(td::Status status) override {
|
||||
LOG(ERROR) << processor.result();
|
||||
td::actor::SchedulerContext::get()->stop();
|
||||
}
|
||||
|
||||
private:
|
||||
td::actor::ActorOwn<> reader_;
|
||||
Processor processor;
|
||||
};
|
||||
auto reader_copy = reader.get();
|
||||
auto callback = td::make_unique<Callback>(std::move(reader));
|
||||
auto processor = td::actor::create_actor<td::AsyncCyclicBufferReader>(
|
||||
"BufferReader", std::move(reader_writer.first), std::move(callback));
|
||||
class ReaderCallback : public td::FileToStreamActor::Callback {
|
||||
public:
|
||||
ReaderCallback(td::actor::ActorId<> actor) : actor_(std::move(actor)) {
|
||||
}
|
||||
void got_more() override {
|
||||
td::actor::send_signals_later(actor_, td::actor::ActorSignals::wakeup());
|
||||
}
|
||||
|
||||
private:
|
||||
td::actor::ActorId<> actor_;
|
||||
};
|
||||
send_closure(reader_copy, &td::FileToStreamActor::set_callback,
|
||||
td::make_unique<ReaderCallback>(processor.release()));
|
||||
});
|
||||
scheduler.run();
|
||||
}
|
||||
|
||||
static char o_direct_buf[100000000];
|
||||
void read_o_direct(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "Direct";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Read | td::FileFd::Direct).move_as_ok();
|
||||
size_t align = 4096;
|
||||
auto *ptr =
|
||||
reinterpret_cast<char *>((reinterpret_cast<std::uintptr_t>(o_direct_buf) + align - 1) & td::bits_negate64(align));
|
||||
|
||||
td::BufferedReader reader(fd, buffer_size);
|
||||
Processor processor;
|
||||
while (true) {
|
||||
auto slice = td::MutableSlice(ptr, buffer_size);
|
||||
auto size = reader.read(slice).move_as_ok();
|
||||
if (size == 0) {
|
||||
break;
|
||||
}
|
||||
processor.process(slice.truncate(size));
|
||||
}
|
||||
LOG(ERROR) << processor.result();
|
||||
}
|
||||
|
||||
class DataGenerator {
|
||||
public:
|
||||
operator bool() const {
|
||||
return generated_size < total_size;
|
||||
}
|
||||
|
||||
td::string next() {
|
||||
auto res = words_[2];
|
||||
generated_size += res.size();
|
||||
return res;
|
||||
}
|
||||
|
||||
private:
|
||||
std::vector<std::string> words_{"a", "fjdksalfdfs", std::string(20, 'b'), std::string(1000, 'a')};
|
||||
size_t total_size = (1 << 20) * 600;
|
||||
size_t generated_size = 0;
|
||||
};
|
||||
|
||||
void write_baseline(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "Baseline";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
|
||||
.move_as_ok();
|
||||
std::vector<char> buf(buffer_size);
|
||||
|
||||
DataGenerator generator;
|
||||
while (generator) {
|
||||
auto slice = generator.next();
|
||||
fd.write(slice).ensure();
|
||||
}
|
||||
fd.sync().ensure();
|
||||
}
|
||||
void write_buffered(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "Buffered";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
|
||||
.move_as_ok();
|
||||
std::vector<char> buf(buffer_size);
|
||||
size_t data_size{0};
|
||||
|
||||
auto flush = [&]() {
|
||||
auto slice = td::Slice(buf.data(), data_size);
|
||||
fd.write(slice).ensure();
|
||||
//auto io_slice = as_io_slice(slice);
|
||||
//fd.writev({&io_slice, 1}).ensure();
|
||||
data_size = 0;
|
||||
};
|
||||
auto append = [&](td::Slice slice) {
|
||||
if (data_size + slice.size() > buffer_size) {
|
||||
flush();
|
||||
}
|
||||
|
||||
td::MutableSlice(buf.data(), buffer_size).substr(data_size).copy_from(slice);
|
||||
data_size += slice.size();
|
||||
};
|
||||
|
||||
DataGenerator generator;
|
||||
while (generator) {
|
||||
auto slice = generator.next();
|
||||
append(slice);
|
||||
}
|
||||
flush();
|
||||
fd.sync().ensure();
|
||||
}
|
||||
|
||||
namespace td {
|
||||
|
||||
class FileWriter {
|
||||
public:
|
||||
FileWriter(FileFd fd, size_t buffer_size) : fd_(std::move(fd)), raw_buffer_(buffer_size) {
|
||||
reset();
|
||||
buffer_slices_.reserve(1024);
|
||||
strings_.reserve(1024);
|
||||
ios_slices_.reserve(1024);
|
||||
}
|
||||
|
||||
void append(std::string data) {
|
||||
cached_size_ += data.size();
|
||||
if (data.size() <= max_copy_size) {
|
||||
append_copy(data);
|
||||
} else {
|
||||
CHECK(strings_.size() < strings_.capacity());
|
||||
strings_.push_back(std::move(data));
|
||||
ios_slices_.push_back(as_io_slice(strings_.back()));
|
||||
should_merge_ = false;
|
||||
}
|
||||
try_flush();
|
||||
}
|
||||
|
||||
void append(BufferSlice data) {
|
||||
cached_size_ += data.size();
|
||||
if (data.size() <= max_copy_size) {
|
||||
append_copy(data);
|
||||
} else {
|
||||
buffer_slices_.push_back(std::move(data));
|
||||
ios_slices_.push_back(as_io_slice(strings_.back()));
|
||||
should_merge_ = false;
|
||||
}
|
||||
try_flush();
|
||||
}
|
||||
|
||||
void append(Slice data) {
|
||||
if (data.size() <= max_copy_size) {
|
||||
append_copy(data);
|
||||
try_flush();
|
||||
} else if (data.size() > min_immediate_write_size) {
|
||||
ios_slices_.push_back(as_io_slice(data));
|
||||
flush();
|
||||
} else {
|
||||
append(BufferSlice(data));
|
||||
}
|
||||
}
|
||||
|
||||
void flush() {
|
||||
if (ios_slices_.empty()) {
|
||||
return;
|
||||
}
|
||||
flushed_size_ += cached_size_;
|
||||
fd_.writev(ios_slices_).ensure();
|
||||
reset();
|
||||
}
|
||||
|
||||
void sync() {
|
||||
flush();
|
||||
synced_size_ = flushed_size_;
|
||||
fd_.sync().ensure();
|
||||
}
|
||||
|
||||
bool may_flush() const {
|
||||
return cached_size_ != 0;
|
||||
}
|
||||
size_t total_size() const {
|
||||
return flushed_size() + cached_size_;
|
||||
}
|
||||
size_t flushed_size() const {
|
||||
return flushed_size_;
|
||||
}
|
||||
size_t synced_size() const {
|
||||
return synced_size_;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr size_t max_cached_size = 256 * (1 << 10);
|
||||
static constexpr size_t min_immediate_write_size = 32 * (1 << 10);
|
||||
|
||||
FileFd fd_;
|
||||
|
||||
std::vector<char> raw_buffer_;
|
||||
size_t max_copy_size = min(raw_buffer_.size() / 8, size_t(4096u));
|
||||
MutableSlice buffer_;
|
||||
bool should_merge_ = false;
|
||||
|
||||
std::vector<BufferSlice> buffer_slices_;
|
||||
std::vector<std::string> strings_;
|
||||
std::vector<IoSlice> ios_slices_;
|
||||
size_t cached_size_{0};
|
||||
size_t flushed_size_{0};
|
||||
size_t synced_size_{0};
|
||||
|
||||
void append_copy(Slice data) {
|
||||
buffer_.copy_from(data);
|
||||
if (should_merge_) {
|
||||
auto back = as_slice(ios_slices_.back());
|
||||
back = Slice(back.data(), back.size() + data.size());
|
||||
ios_slices_.back() = as_io_slice(back);
|
||||
} else {
|
||||
ios_slices_.push_back(as_io_slice(buffer_.substr(0, data.size())));
|
||||
should_merge_ = true;
|
||||
}
|
||||
buffer_ = buffer_.substr(data.size());
|
||||
}
|
||||
|
||||
void reset() {
|
||||
buffer_ = MutableSlice(raw_buffer_.data(), raw_buffer_.size());
|
||||
buffer_slices_.clear();
|
||||
strings_.clear();
|
||||
ios_slices_.clear();
|
||||
should_merge_ = false;
|
||||
cached_size_ = 0;
|
||||
}
|
||||
|
||||
bool must_flush() const {
|
||||
return buffer_.size() < max_copy_size || ios_slices_.size() == ios_slices_.capacity() ||
|
||||
cached_size_ >= max_cached_size;
|
||||
}
|
||||
void try_flush() {
|
||||
if (!must_flush()) {
|
||||
return;
|
||||
}
|
||||
flush();
|
||||
}
|
||||
};
|
||||
|
||||
class AsyncFileWriterActor : public actor::Actor {
|
||||
public:
|
||||
AsyncFileWriterActor(FileSyncState::Reader state) : state_(std::move(state)) {
|
||||
io_slices_.reserve(100);
|
||||
}
|
||||
|
||||
private:
|
||||
FileFd fd_;
|
||||
ChainBufferReader reader_;
|
||||
FileSyncState::Reader state_;
|
||||
std::vector<IoSlice> io_slices_;
|
||||
|
||||
size_t flushed_size_{0};
|
||||
size_t synced_size_{0};
|
||||
|
||||
void flush() {
|
||||
reader_.sync_with_writer();
|
||||
while (!reader_.empty()) {
|
||||
auto it = reader_.clone();
|
||||
size_t io_slices_size = 0;
|
||||
while (!it.empty() && io_slices_.size() < io_slices_.capacity()) {
|
||||
auto slice = it.prepare_read();
|
||||
io_slices_.push_back(as_io_slice(slice));
|
||||
io_slices_size += slice.size();
|
||||
it.confirm_read(slice.size());
|
||||
}
|
||||
if (!io_slices_.empty()) {
|
||||
auto r_written = fd_.writev(io_slices_);
|
||||
LOG_IF(FATAL, r_written.is_error()) << r_written.error();
|
||||
auto written = r_written.move_as_ok();
|
||||
CHECK(written == io_slices_size);
|
||||
flushed_size_ += written;
|
||||
io_slices_.clear();
|
||||
}
|
||||
reader_ = std::move(it);
|
||||
}
|
||||
}
|
||||
|
||||
void loop() override {
|
||||
reader_.sync_with_writer();
|
||||
flush();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace td
|
||||
|
||||
void write_vector(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "io vector";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
|
||||
.move_as_ok();
|
||||
td::FileWriter writer(std::move(fd), buffer_size);
|
||||
|
||||
DataGenerator generator;
|
||||
while (generator) {
|
||||
auto slice = generator.next();
|
||||
writer.append(std::move(slice));
|
||||
}
|
||||
writer.sync();
|
||||
}
|
||||
|
||||
void write_async(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "Async";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
|
||||
.move_as_ok();
|
||||
td::actor::Scheduler scheduler({1});
|
||||
scheduler.run_in_context([&] {
|
||||
class Writer : public td::actor::Actor {
|
||||
public:
|
||||
Writer(td::FileFd fd, size_t buffer_size) : fd_(std::move(fd)), buffer_size_(buffer_size) {
|
||||
}
|
||||
class Callback : public td::StreamToFileActor::Callback {
|
||||
public:
|
||||
Callback(td::actor::ActorShared<> parent) : parent_(std::move(parent)) {
|
||||
}
|
||||
void on_sync_state_changed() override {
|
||||
td::actor::send_signals_later(parent_, td::actor::ActorSignals::wakeup());
|
||||
}
|
||||
|
||||
private:
|
||||
td::actor::ActorShared<> parent_;
|
||||
};
|
||||
|
||||
void start_up() override {
|
||||
auto buffer_reader_writer = td::ChainBuffer::create();
|
||||
buffer_writer_ = std::move(buffer_reader_writer.second);
|
||||
auto buffer_reader = std::move(buffer_reader_writer.first);
|
||||
|
||||
auto sync_state_reader_writer = td::FileSyncState::create();
|
||||
fd_sync_state_ = std::move(sync_state_reader_writer.first);
|
||||
auto sync_state_writer = std::move(sync_state_reader_writer.second);
|
||||
auto options = td::StreamToFileActor::Options{};
|
||||
writer_ = td::actor::create_actor<td::StreamToFileActor>(td::actor::ActorOptions().with_name("FileWriterActor"),
|
||||
std::move(buffer_reader), std::move(fd_),
|
||||
std::move(sync_state_writer), options);
|
||||
send_closure(writer_, &td::StreamToFileActor::set_callback, td::make_unique<Callback>(actor_shared(this)));
|
||||
loop();
|
||||
}
|
||||
|
||||
private:
|
||||
td::FileFd fd_;
|
||||
td::optional<td::ChainBuffer::Writer> buffer_writer_;
|
||||
td::optional<td::FileSyncState::Reader> fd_sync_state_;
|
||||
td::actor::ActorOwn<td::StreamToFileActor> writer_;
|
||||
size_t buffer_size_;
|
||||
DataGenerator generator_;
|
||||
size_t total_size_{0};
|
||||
bool was_sync_{false};
|
||||
|
||||
void loop() override {
|
||||
auto flushed_size = fd_sync_state_.value().flushed_size();
|
||||
while (generator_ && total_size_ < flushed_size + buffer_size_ * 10) {
|
||||
auto str = generator_.next();
|
||||
total_size_ += str.size();
|
||||
buffer_writer_.value().append(str);
|
||||
}
|
||||
td::actor::send_signals_later(writer_, td::actor::ActorSignals::wakeup());
|
||||
if (generator_) {
|
||||
return;
|
||||
} else if (!was_sync_) {
|
||||
was_sync_ = true;
|
||||
fd_sync_state_.value().set_requested_sync_size(total_size_);
|
||||
td::actor::send_signals_later(writer_, td::actor::ActorSignals::wakeup());
|
||||
}
|
||||
if (fd_sync_state_.value().synced_size() == total_size_) {
|
||||
writer_.reset();
|
||||
}
|
||||
}
|
||||
void hangup_shared() override {
|
||||
td::actor::SchedulerContext::get()->stop();
|
||||
stop();
|
||||
}
|
||||
};
|
||||
td::actor::create_actor<Writer>("Writer", std::move(fd), buffer_size).release();
|
||||
});
|
||||
scheduler.run();
|
||||
}
|
||||
|
||||
void write_async2(td::CSlice path, size_t buffer_size) {
|
||||
LOG(ERROR) << "Async2";
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
|
||||
.move_as_ok();
|
||||
td::actor::Scheduler scheduler({1});
|
||||
scheduler.run_in_context([&] {
|
||||
class Worker : public td::actor::Actor {
|
||||
public:
|
||||
Worker(td::FileFd fd, td::ChainBufferReader reader, td::actor::ActorShared<> parent)
|
||||
: fd_(std::move(fd)), reader_(std::move(reader)), parent_(std::move(parent)) {
|
||||
}
|
||||
|
||||
private:
|
||||
td::FileFd fd_;
|
||||
td::ChainBufferReader reader_;
|
||||
td::actor::ActorShared<> parent_;
|
||||
void loop() override {
|
||||
reader_.sync_with_writer();
|
||||
while (!reader_.empty()) {
|
||||
auto slice = reader_.prepare_read();
|
||||
fd_.write(slice).ensure();
|
||||
reader_.confirm_read(slice.size());
|
||||
}
|
||||
}
|
||||
void hangup() override {
|
||||
loop();
|
||||
fd_.sync().ensure();
|
||||
stop();
|
||||
}
|
||||
};
|
||||
class Writer : public td::actor::Actor {
|
||||
public:
|
||||
Writer(td::FileFd fd) : fd_(std::move(fd)) {
|
||||
}
|
||||
|
||||
private:
|
||||
td::FileFd fd_;
|
||||
td::actor::ActorOwn<> worker_;
|
||||
td::ChainBufferWriter writer_;
|
||||
DataGenerator generator_;
|
||||
|
||||
void start_up() override {
|
||||
worker_ =
|
||||
td::actor::create_actor<Worker>("Worker", std::move(fd_), writer_.extract_reader(), actor_shared(this));
|
||||
while (generator_) {
|
||||
writer_.append(generator_.next(), 65536);
|
||||
send_signals_later(worker_, td::actor::ActorSignals::wakeup());
|
||||
}
|
||||
worker_.reset();
|
||||
}
|
||||
void hangup_shared() override {
|
||||
td::actor::SchedulerContext::get()->stop();
|
||||
stop();
|
||||
}
|
||||
};
|
||||
td::actor::create_actor<Writer>(td::actor::ActorOptions().with_name("Writer").with_poll(), std::move(fd)).release();
|
||||
});
|
||||
scheduler.run();
|
||||
}
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
std::string from;
|
||||
enum Type { Read, Write };
|
||||
Type type{Write};
|
||||
enum Mode { Baseline, Buffered, Direct, Async, WriteV, Async2 };
|
||||
Mode mode = Baseline;
|
||||
size_t buffer_size = 1024;
|
||||
|
||||
td::OptionsParser options_parser;
|
||||
options_parser.add_option('f', td::Slice("from"), td::Slice("read from file"), [&](td::Slice arg) -> td::Status {
|
||||
from = arg.str();
|
||||
return td::Status::OK();
|
||||
});
|
||||
options_parser.add_option('m', td::Slice("mode"), td::Slice("mode"), [&](td::Slice arg) -> td::Status {
|
||||
TRY_RESULT(x, td::to_integer_safe<int>(arg));
|
||||
switch (x) {
|
||||
case 0:
|
||||
mode = Baseline;
|
||||
return td::Status::OK();
|
||||
case 1:
|
||||
mode = Buffered;
|
||||
return td::Status::OK();
|
||||
case 2:
|
||||
mode = Direct;
|
||||
return td::Status::OK();
|
||||
case 3:
|
||||
mode = Async;
|
||||
return td::Status::OK();
|
||||
case 4:
|
||||
mode = WriteV;
|
||||
return td::Status::OK();
|
||||
case 5:
|
||||
mode = Async2;
|
||||
return td::Status::OK();
|
||||
}
|
||||
return td::Status::Error("unknown mode");
|
||||
});
|
||||
options_parser.add_option('b', td::Slice("buffer"), td::Slice("buffer size"), [&](td::Slice arg) -> td::Status {
|
||||
TRY_RESULT(x, td::to_integer_safe<size_t>(arg));
|
||||
buffer_size = x;
|
||||
return td::Status::OK();
|
||||
});
|
||||
|
||||
auto status = options_parser.run(argc, argv);
|
||||
if (status.is_error()) {
|
||||
LOG(ERROR) << status.error() << "\n" << options_parser;
|
||||
return 0;
|
||||
}
|
||||
|
||||
switch (type) {
|
||||
case Read:
|
||||
switch (mode) {
|
||||
case Baseline:
|
||||
read_baseline(from);
|
||||
break;
|
||||
case Buffered:
|
||||
read_buffered(from, buffer_size);
|
||||
break;
|
||||
case Direct:
|
||||
read_o_direct(from, buffer_size);
|
||||
break;
|
||||
case Async:
|
||||
read_async(from, buffer_size);
|
||||
break;
|
||||
case Async2:
|
||||
case WriteV:
|
||||
LOG(FATAL) << "Not supported mode for Read test";
|
||||
}
|
||||
break;
|
||||
case Write:
|
||||
switch (mode) {
|
||||
case Baseline:
|
||||
write_baseline(from, buffer_size);
|
||||
break;
|
||||
case Buffered:
|
||||
write_buffered(from, buffer_size);
|
||||
break;
|
||||
case WriteV:
|
||||
write_vector(from, buffer_size);
|
||||
break;
|
||||
case Async:
|
||||
write_async(from, buffer_size);
|
||||
break;
|
||||
case Async2:
|
||||
write_async2(from, buffer_size);
|
||||
break;
|
||||
case Direct:
|
||||
LOG(FATAL) << "Unimplemented";
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
245
tddb/test/key_value.cpp
Normal file
245
tddb/test/key_value.cpp
Normal file
|
@ -0,0 +1,245 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Copyright 2017-2019 Telegram Systems LLP
|
||||
*/
|
||||
#include "td/utils/tests.h"
|
||||
|
||||
#include "td/db/KeyValueAsync.h"
|
||||
#include "td/db/KeyValue.h"
|
||||
#include "td/db/RocksDb.h"
|
||||
|
||||
#include "td/utils/benchmark.h"
|
||||
#include "td/utils/buffer.h"
|
||||
#include "td/utils/optional.h"
|
||||
#include "td/utils/UInt.h"
|
||||
|
||||
TEST(KeyValue, simple) {
|
||||
td::Slice db_name = "testdb";
|
||||
td::RocksDb::destroy(db_name).ignore();
|
||||
|
||||
std::unique_ptr<td::KeyValue> kv = std::make_unique<td::RocksDb>(td::RocksDb::open(db_name.str()).move_as_ok());
|
||||
auto set_value = [&](td::Slice key, td::Slice value) { kv->set(key, value); };
|
||||
auto ensure_value = [&](td::Slice key, td::Slice value) {
|
||||
std::string kv_value;
|
||||
auto status = kv->get(key, kv_value).move_as_ok();
|
||||
ASSERT_EQ(td::int32(status), td::int32(td::KeyValue::GetStatus::Ok));
|
||||
ASSERT_EQ(kv_value, value);
|
||||
};
|
||||
auto ensure_no_value = [&](td::Slice key) {
|
||||
std::string kv_value;
|
||||
auto status = kv->get(key, kv_value).move_as_ok();
|
||||
ASSERT_EQ(td::int32(status), td::int32(td::KeyValue::GetStatus::NotFound));
|
||||
};
|
||||
|
||||
ensure_no_value("A");
|
||||
set_value("A", "HELLO");
|
||||
ensure_value("A", "HELLO");
|
||||
|
||||
td::UInt128 x;
|
||||
std::fill(as_slice(x).begin(), as_slice(x).end(), '1');
|
||||
x.raw[5] = 0;
|
||||
set_value(as_slice(x), as_slice(x));
|
||||
ensure_value(as_slice(x), as_slice(x));
|
||||
|
||||
kv.reset();
|
||||
kv = std::make_unique<td::RocksDb>(td::RocksDb::open(db_name.str()).move_as_ok());
|
||||
ensure_value("A", "HELLO");
|
||||
ensure_value(as_slice(x), as_slice(x));
|
||||
};
|
||||
|
||||
TEST(KeyValue, async_simple) {
|
||||
td::Slice db_name = "testdb";
|
||||
td::RocksDb::destroy(db_name).ignore();
|
||||
|
||||
td::actor::Scheduler scheduler({6});
|
||||
auto watcher = td::create_shared_destructor([] { td::actor::SchedulerContext::get()->stop(); });
|
||||
|
||||
class Worker : public td::actor::Actor {
|
||||
public:
|
||||
Worker(std::shared_ptr<td::Destructor> watcher, std::string db_name)
|
||||
: watcher_(std::move(watcher)), db_name_(std::move(db_name)) {
|
||||
}
|
||||
void start_up() override {
|
||||
loop();
|
||||
}
|
||||
void tear_down() override {
|
||||
}
|
||||
void loop() override {
|
||||
if (!kv_) {
|
||||
kv_ = td::KeyValueAsync<td::UInt128, td::BufferSlice>(
|
||||
std::make_unique<td::RocksDb>(td::RocksDb::open(db_name_).move_as_ok()));
|
||||
set_start_at_ = td::Timestamp::now();
|
||||
}
|
||||
if (next_set_ && next_set_.is_in_past()) {
|
||||
for (size_t i = 0; i < 10 && left_cnt_ > 0; i++, left_cnt_--) {
|
||||
do_set();
|
||||
}
|
||||
if (left_cnt_ > 0) {
|
||||
next_set_ = td::Timestamp::in(0.001);
|
||||
alarm_timestamp() = next_set_;
|
||||
} else {
|
||||
next_set_ = td::Timestamp::never();
|
||||
set_finish_at_ = td::Timestamp::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<td::Destructor> watcher_;
|
||||
td::optional<td::KeyValueAsync<td::UInt128, td::BufferSlice>> kv_;
|
||||
std::string db_name_;
|
||||
int left_cnt_ = 10000;
|
||||
int pending_cnt_ = left_cnt_;
|
||||
td::Timestamp next_set_ = td::Timestamp::now();
|
||||
td::Timestamp set_start_at_;
|
||||
td::Timestamp set_finish_at_;
|
||||
|
||||
void do_set() {
|
||||
td::UInt128 key;
|
||||
td::Random::secure_bytes(as_slice(key));
|
||||
td::BufferSlice data(1024);
|
||||
td::Random::secure_bytes(as_slice(data));
|
||||
kv_.value().set(key, std::move(data), [actor_id = actor_id(this)](td::Result<td::Unit> res) {
|
||||
res.ensure();
|
||||
send_closure(actor_id, &Worker::on_stored);
|
||||
});
|
||||
}
|
||||
|
||||
void on_stored() {
|
||||
pending_cnt_--;
|
||||
if (pending_cnt_ == 0) {
|
||||
auto now = td::Timestamp::now();
|
||||
LOG(ERROR) << (now.at() - set_finish_at_.at());
|
||||
LOG(ERROR) << (set_finish_at_.at() - set_start_at_.at());
|
||||
stop();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
scheduler.run_in_context([watcher = std::move(watcher), &db_name]() mutable {
|
||||
td::actor::create_actor<Worker>("Worker", watcher, db_name.str()).release();
|
||||
watcher.reset();
|
||||
});
|
||||
|
||||
scheduler.run();
|
||||
};
|
||||
|
||||
class KeyValueBenchmark : public td::Benchmark {
|
||||
public:
|
||||
std::string get_description() const override {
|
||||
return "kv transation benchmark";
|
||||
}
|
||||
|
||||
void start_up() override {
|
||||
td::RocksDb::destroy("ttt");
|
||||
db_ = td::RocksDb::open("ttt").move_as_ok();
|
||||
}
|
||||
void tear_down() override {
|
||||
db_ = {};
|
||||
}
|
||||
void run(int n) override {
|
||||
for (int i = 0; i < n; i++) {
|
||||
db_.value().begin_transaction();
|
||||
db_.value().set(PSLICE() << i, PSLICE() << i);
|
||||
db_.value().commit_transaction();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
td::optional<td::RocksDb> db_;
|
||||
};
|
||||
|
||||
TEST(KeyValue, Bench) {
|
||||
td::bench(KeyValueBenchmark());
|
||||
}
|
||||
|
||||
TEST(KeyValue, Stress) {
|
||||
return;
|
||||
td::Slice db_name = "testdb";
|
||||
td::RocksDb::destroy(db_name).ignore();
|
||||
|
||||
td::actor::Scheduler scheduler({6});
|
||||
auto watcher = td::create_shared_destructor([] { td::actor::SchedulerContext::get()->stop(); });
|
||||
|
||||
class Worker : public td::actor::Actor {
|
||||
public:
|
||||
Worker(std::shared_ptr<td::Destructor> watcher, std::string db_name)
|
||||
: watcher_(std::move(watcher)), db_name_(std::move(db_name)) {
|
||||
}
|
||||
void start_up() override {
|
||||
loop();
|
||||
}
|
||||
void tear_down() override {
|
||||
}
|
||||
void loop() override {
|
||||
if (!kv_) {
|
||||
kv_ = td::KeyValueAsync<td::UInt128, td::BufferSlice>(
|
||||
std::make_unique<td::RocksDb>(td::RocksDb::open(db_name_).move_as_ok()));
|
||||
set_start_at_ = td::Timestamp::now();
|
||||
}
|
||||
if (next_set_ && next_set_.is_in_past()) {
|
||||
for (size_t i = 0; i < 10 && left_cnt_ > 0; i++, left_cnt_--) {
|
||||
do_set();
|
||||
}
|
||||
if (left_cnt_ > 0) {
|
||||
next_set_ = td::Timestamp::in(0.01);
|
||||
alarm_timestamp() = next_set_;
|
||||
} else {
|
||||
next_set_ = td::Timestamp::never();
|
||||
set_finish_at_ = td::Timestamp::now();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::shared_ptr<td::Destructor> watcher_;
|
||||
td::optional<td::KeyValueAsync<td::UInt128, td::BufferSlice>> kv_;
|
||||
std::string db_name_;
|
||||
int left_cnt_ = 1000000000;
|
||||
int pending_cnt_ = left_cnt_;
|
||||
td::Timestamp next_set_ = td::Timestamp::now();
|
||||
td::Timestamp set_start_at_;
|
||||
td::Timestamp set_finish_at_;
|
||||
|
||||
void do_set() {
|
||||
td::UInt128 key = td::UInt128::zero();
|
||||
td::Random::secure_bytes(as_slice(key).substr(0, 1));
|
||||
td::BufferSlice data(1024);
|
||||
td::Random::secure_bytes(as_slice(data));
|
||||
kv_.value().set(key, std::move(data), [actor_id = actor_id(this)](td::Result<td::Unit> res) {
|
||||
res.ensure();
|
||||
send_closure(actor_id, &Worker::on_stored);
|
||||
});
|
||||
}
|
||||
|
||||
void on_stored() {
|
||||
pending_cnt_--;
|
||||
if (pending_cnt_ == 0) {
|
||||
auto now = td::Timestamp::now();
|
||||
LOG(ERROR) << (now.at() - set_finish_at_.at());
|
||||
LOG(ERROR) << (set_finish_at_.at() - set_start_at_.at());
|
||||
stop();
|
||||
}
|
||||
}
|
||||
};
|
||||
scheduler.run_in_context([watcher = std::move(watcher), &db_name]() mutable {
|
||||
td::actor::create_actor<Worker>("Worker", watcher, db_name.str()).release();
|
||||
watcher.reset();
|
||||
});
|
||||
|
||||
scheduler.run();
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue