1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-03-09 15:40:10 +00:00

initial commit

This commit is contained in:
initial commit 2019-09-07 14:03:22 +04:00 committed by vvaltman
commit c2da007f40
1610 changed files with 398047 additions and 0 deletions

810
tddb/test/binlog.cpp Normal file
View file

@ -0,0 +1,810 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2019 Telegram Systems LLP
*/
#include "td/utils/tests.h"
#include "td/utils/as.h"
#include "td/utils/base64.h"
#include "td/utils/benchmark.h"
#include "td/utils/buffer.h"
#include "td/utils/crypto.h"
#include "td/utils/filesystem.h"
#include "td/utils/Slice.h"
#include "td/utils/Span.h"
#include "td/utils/misc.h"
#include "td/utils/overloaded.h"
#include "td/utils/optional.h"
#include "td/utils/port/FileFd.h"
#include "td/utils/port/path.h"
#include "td/utils/port/IoSlice.h"
#include "td/utils/UInt.h"
#include "td/utils/Variant.h"
#include "td/utils/VectorQueue.h"
#include "td/actor/actor.h"
#include "td/db/utils/StreamInterface.h"
#include "td/db/utils/ChainBuffer.h"
#include "td/db/utils/CyclicBuffer.h"
#include "td/db/binlog/BinlogReaderHelper.h"
#include "td/db/binlog/Binlog.h"
#include <ctime>
// Toy Binlog Implementation
using td::int64;
using td::MutableSlice;
using td::Result;
using td::Slice;
using td::Status;
using RootHash = td::UInt256;
using FileHash = td::UInt256;
struct BlockId {
int workchain;
unsigned seqno;
unsigned long long shard;
};
template <class T>
Result<int64> memcpy_parse(Slice data, T* res) {
if (data.size() < sizeof(T)) {
return -static_cast<int64>(sizeof(T));
}
std::memcpy(res, data.data(), sizeof(T));
if (res->tag_field != res->tag) {
return Status::Error("Tag mismatch");
}
return sizeof(T);
}
template <class T>
int64 memcpy_serialize(MutableSlice data, const T& res) {
if (data.size() < sizeof(T)) {
return -static_cast<int64>(sizeof(T));
}
std::memcpy(data.data(), &res, sizeof(T));
return sizeof(T);
}
#pragma pack(push, 4)
struct LogEventCrc32C {
static constexpr unsigned tag = 0x473a830a;
unsigned tag_field;
td::uint32 crc32c;
LogEventCrc32C() = default;
LogEventCrc32C(td::uint32 crc32c) : tag_field(tag), crc32c(crc32c) {
}
static Result<int64> parse(Slice data, LogEventCrc32C* res) {
return memcpy_parse(data, res);
}
int64 serialize(MutableSlice data) const {
return memcpy_serialize(data, *this);
}
auto key() const {
return crc32c;
}
bool operator==(const LogEventCrc32C& other) const {
return key() == other.key();
}
bool operator!=(const LogEventCrc32C& other) const {
return !(*this == other);
}
};
struct LogEventStart {
static constexpr unsigned tag = 0x0442446b;
static constexpr unsigned log_type = 0x290100;
unsigned tag_field;
unsigned type_field;
unsigned created_at;
unsigned char zerostate_root_hash[32];
LogEventStart() = default;
LogEventStart(const RootHash& hash, unsigned _now = 0)
: tag_field(tag), type_field(log_type), created_at(_now ? _now : (unsigned)std::time(nullptr)) {
td::as<RootHash>(zerostate_root_hash) = hash;
}
static Result<int64> parse(Slice data, LogEventStart* res) {
return memcpy_parse(data, res);
}
int64 serialize(MutableSlice data) const {
return memcpy_serialize(data, *this);
}
auto key() const {
return std::make_tuple(tag_field, type_field, created_at, Slice(zerostate_root_hash, 32));
}
bool operator==(const LogEventStart& other) const {
return key() == other.key();
}
bool operator!=(const LogEventStart& other) const {
return !(*this == other);
}
};
struct LogEventSetZeroState {
static constexpr unsigned tag = 0x63ab3cd9;
unsigned tag_field;
unsigned flags;
long long file_size;
unsigned char file_hash[32];
unsigned char root_hash[32];
LogEventSetZeroState() = default;
LogEventSetZeroState(const RootHash& rhash, const FileHash& fhash, unsigned long long _fsize, unsigned _flags = 0)
: tag_field(tag), flags(_flags), file_size(_fsize) {
td::as<FileHash>(file_hash) = fhash;
td::as<RootHash>(root_hash) = rhash;
}
static Result<int64> parse(Slice data, LogEventSetZeroState* res) {
return memcpy_parse(data, res);
}
int64 serialize(MutableSlice data) const {
return memcpy_serialize(data, *this);
}
auto key() const {
return std::make_tuple(tag_field, flags, file_size, Slice(file_hash, 32), Slice(root_hash, 32));
}
bool operator==(const LogEventSetZeroState& other) const {
return key() == other.key();
}
bool operator!=(const LogEventSetZeroState& other) const {
return !(*this == other);
}
};
struct LogEventNewBlock {
static constexpr unsigned tag = 0x19f4bc63;
unsigned tag_field;
unsigned flags; // lower 8 bits = authority
int workchain;
unsigned seqno;
unsigned long long shard;
long long file_size;
unsigned char file_hash[32];
unsigned char root_hash[32];
unsigned char last_bytes[8];
LogEventNewBlock() = default;
LogEventNewBlock(const BlockId& block, const RootHash& rhash, const FileHash& fhash, unsigned long long _fsize,
unsigned _flags)
: tag_field(tag)
, flags(_flags)
, workchain(block.workchain)
, seqno(block.seqno)
, shard(block.shard)
, file_size(_fsize) {
td::as<FileHash>(file_hash) = fhash;
td::as<RootHash>(root_hash) = rhash;
td::as<unsigned long long>(last_bytes) = 0;
}
static Result<int64> parse(Slice data, LogEventNewBlock* res) {
return memcpy_parse(data, res);
}
int64 serialize(MutableSlice data) const {
return memcpy_serialize(data, *this);
}
auto key() const {
return std::make_tuple(tag_field, flags, workchain, seqno, shard, file_size, Slice(file_hash, 32),
Slice(root_hash, 32), Slice(last_bytes, 8));
}
bool operator==(const LogEventNewBlock& other) const {
return key() == other.key();
}
bool operator!=(const LogEventNewBlock& other) const {
return !(*this == other);
}
};
struct LogEventNewState {
static constexpr unsigned tag = 0x4190a21f;
unsigned tag_field;
unsigned flags; // lower 8 bits = authority
int workchain;
unsigned seqno;
unsigned long long shard;
long long file_size;
unsigned char file_hash[32];
unsigned char root_hash[32];
unsigned char last_bytes[8];
LogEventNewState() = default;
LogEventNewState(const BlockId& state, const RootHash& rhash, const FileHash& fhash, unsigned long long _fsize,
unsigned _flags)
: tag_field(tag)
, flags(_flags)
, workchain(state.workchain)
, seqno(state.seqno)
, shard(state.shard)
, file_size(_fsize) {
td::as<FileHash>(file_hash) = fhash;
td::as<RootHash>(root_hash) = rhash;
td::as<unsigned long long>(last_bytes) = 0;
}
static Result<int64> parse(Slice data, LogEventNewState* res) {
return memcpy_parse(data, res);
}
int64 serialize(MutableSlice data) const {
return memcpy_serialize(data, *this);
}
auto key() const {
return std::make_tuple(tag_field, flags, workchain, seqno, shard, file_size, Slice(file_hash, 32),
Slice(root_hash, 32), Slice(last_bytes, 8));
}
bool operator==(const LogEventNewState& other) const {
return key() == other.key();
}
bool operator!=(const LogEventNewState& other) const {
return !(*this == other);
}
};
#pragma pack(pop)
struct LogEventString {
static constexpr unsigned tag = 0xabcdabcd;
std::string data;
bool operator==(const LogEventString& other) const {
return data == other.data;
}
bool operator!=(const LogEventString& other) const {
return !(*this == other);
}
int64 serialize(MutableSlice dest) const {
size_t need_size = 8 + data.size();
if (dest.size() < need_size) {
return -static_cast<int64>(need_size);
}
dest.truncate(need_size);
td::as<unsigned>(dest.data()) = tag;
td::as<int>(dest.data() + 4) = td::narrow_cast<int>(data.size());
dest.substr(8).copy_from(data);
return dest.size();
}
static Result<int64> parse(Slice data, LogEventString* res) {
if (data.size() < 4) {
return -4;
}
unsigned got_tag = td::as<unsigned>(data.data());
if (got_tag != tag) {
return Status::Error(PSLICE() << "tag mismatch " << td::format::as_hex(got_tag));
}
data = data.substr(4);
if (data.size() < 4) {
return -8;
}
td::int64 length = td::as<td::uint32>(data.data());
data = data.substr(4);
if (static_cast<int64>(data.size()) < length) {
return -length - 8;
}
res->data = data.substr(0, td::narrow_cast<std::size_t>(length)).str();
return length + 8;
}
};
struct LogEvent {
td::Variant<LogEventCrc32C, LogEventStart, LogEventString, LogEventNewBlock, LogEventNewState, LogEventSetZeroState>
event_{LogEventStart{}};
bool operator==(const LogEvent& other) const {
return event_ == other.event_;
}
bool operator!=(const LogEvent& other) const {
return !(*this == other);
}
LogEvent() = default;
LogEvent(LogEvent&& other) = default;
template <class T>
LogEvent(T&& e) : event_(std::forward<T>(e)) {
}
int64 serialize(MutableSlice data) const {
int64 res;
event_.visit([&](auto& e) { res = e.serialize(data); });
return res;
}
static Result<int64> parse(Slice data, LogEvent* res) {
if (data.size() < 4) {
return -4;
}
//LOG(ERROR) << td::format::as_hex_dump<4>(data);
unsigned got_tag = td::as<unsigned>(data.data());
switch (got_tag) {
case LogEventCrc32C::tag: {
LogEventCrc32C e;
TRY_RESULT(x, e.parse(data, &e));
if (x >= 0) {
res->event_ = e;
}
return x;
}
case LogEventStart::tag: {
LogEventStart e;
TRY_RESULT(x, e.parse(data, &e));
if (x >= 0) {
res->event_ = e;
}
return x;
}
case LogEventSetZeroState::tag: {
LogEventSetZeroState e;
TRY_RESULT(x, e.parse(data, &e));
if (x >= 0) {
res->event_ = e;
}
return x;
}
case LogEventNewBlock::tag: {
LogEventNewBlock e;
TRY_RESULT(x, e.parse(data, &e));
if (x >= 0) {
res->event_ = e;
}
return x;
}
case LogEventNewState::tag: {
LogEventNewState e;
TRY_RESULT(x, e.parse(data, &e));
if (x >= 0) {
res->event_ = e;
}
return x;
}
case LogEventString::tag: {
LogEventString e;
TRY_RESULT(x, e.parse(data, &e));
if (x >= 0) {
res->event_ = e;
}
return x;
}
default:
return Status::Error(PSLICE() << "Unknown tag: " << td::format::as_hex(got_tag));
}
}
};
static td::CSlice test_binlog_path("test.binlog");
class BinlogReader : public td::BinlogReaderInterface {
public:
td::Span<LogEvent> logevents() const {
return logevents_;
}
td::Result<td::int64> parse(td::Slice data) override {
if (data.size() < 4) {
return -4;
}
LogEvent res;
TRY_RESULT(size, res.parse(data, &res));
if (size > 0) {
if (res.event_.get_offset() == res.event_.offset<LogEventCrc32C>()) {
auto crc = res.event_.get<LogEventCrc32C>().crc32c;
flush_crc();
if (crc != crc_) {
return Status::Error("Crc mismatch");
}
} else {
logevents_.emplace_back(std::move(res));
}
lazy_crc_extend(data.substr(0, td::narrow_cast<std::size_t>(size)));
}
return size;
}
td::uint32 crc32c() {
flush_crc();
return crc_;
}
void flush() override {
flush_crc();
}
private:
std::vector<LogEvent> logevents_;
td::uint32 crc_{0};
td::Slice suffix_;
void flush_crc() {
crc_ = td::crc32c_extend(crc_, suffix_);
suffix_ = Slice();
}
void lazy_crc_extend(Slice slice) {
if (suffix_.empty()) {
suffix_ = slice;
return;
}
if (suffix_.end() == slice.begin()) {
suffix_ = Slice(suffix_.begin(), slice.end());
return;
}
flush_crc();
suffix_ = slice;
}
};
class RandomBinlog {
public:
RandomBinlog() {
size_t logevent_count = 1000;
for (size_t i = 0; i < logevent_count; i++) {
add_logevent(create_random_logevent());
}
}
Slice data() const {
return data_;
}
td::Span<LogEvent> logevents() const {
return logevents_;
}
private:
std::vector<LogEvent> logevents_;
std::string data_;
template <class T>
void add_logevent(T event) {
int64 size = -event.serialize({});
std::string data(td::narrow_cast<std::size_t>(size), '\0');
int64 new_size = event.serialize(data);
CHECK(new_size == size);
data_ += data;
logevents_.emplace_back(std::move(event));
}
LogEvent create_random_logevent() {
auto rand_uint256 = [] {
td::UInt256 res;
td::Random::secure_bytes(as_slice(res));
return res;
};
auto rand_block_id = [] {
BlockId res;
res.workchain = td::Random::fast(0, 100);
res.shard = td::Random::fast(0, 100);
res.seqno = td::Random::fast(0, 100);
return res;
};
auto type = td::Random::fast(0, 4);
switch (type) {
case 0: {
auto size = td::Random::fast(0, 10);
LogEventString event;
event.data = td::rand_string('a', 'z', size);
return event;
}
case 1: {
return LogEventStart(rand_uint256(), 12);
}
case 2: {
return LogEventSetZeroState(rand_uint256(), rand_uint256(), td::Random::fast(0, 1000),
td::Random::fast(0, 1000));
}
case 3: {
return LogEventNewBlock(rand_block_id(), rand_uint256(), rand_uint256(), 12, 17);
}
case 4: {
return LogEventNewState(rand_block_id(), rand_uint256(), rand_uint256(), 12, 17);
}
}
UNREACHABLE();
}
};
void test_binlog(td::Slice data, td::optional<td::Span<LogEvent>> events = {}) {
auto splitted_binlog = td::rand_split(data);
std::string new_binlog_data;
BinlogReader reader;
td::BinlogReaderHelper reader_impl;
for (auto& chunk : splitted_binlog) {
reader_impl.parse(reader, chunk).ensure();
}
//Binlog write sync
{
td::Binlog::destroy(test_binlog_path);
td::BinlogWriter binlog_writer(test_binlog_path.str());
binlog_writer.open().ensure();
BinlogReader new_reader;
size_t i = 0;
for (auto& logevent : reader.logevents()) {
binlog_writer.write_event(logevent, &new_reader).ensure();
i++;
if (i % 10 == 0) {
binlog_writer.write_event(LogEvent(LogEventCrc32C(new_reader.crc32c())), &new_reader).ensure();
}
}
binlog_writer.sync();
binlog_writer.close().ensure();
auto file_data = read_file(test_binlog_path).move_as_ok();
ASSERT_TRUE(reader.logevents() == new_reader.logevents());
new_binlog_data = file_data.as_slice().str();
data = new_binlog_data;
//ASSERT_EQ(data, file_data);
}
//Binlog write async
{
td::Binlog::destroy(test_binlog_path);
td::BinlogWriterAsync binlog_writer(test_binlog_path.str());
td::actor::Scheduler scheduler({2});
BinlogReader new_reader;
scheduler.run_in_context([&]() mutable {
binlog_writer.open().ensure();
for (auto& logevent : reader.logevents()) {
binlog_writer.write_event(logevent, &new_reader).ensure();
}
binlog_writer.sync([&](Result<td::Unit> res) {
res.ensure();
binlog_writer.close([&](Result<td::Unit> res) {
res.ensure();
td::actor::SchedulerContext::get()->stop();
});
});
});
scheduler.run();
scheduler.stop();
auto file_data = read_file(test_binlog_path).move_as_ok();
ASSERT_TRUE(reader.logevents() == new_reader.logevents());
//ASSERT_EQ(data, file_data);
}
ASSERT_TRUE(!events || events.value() == reader.logevents());
std::string new_data;
for (auto& event : reader.logevents()) {
int64 size = -event.serialize({});
std::string event_data(td::narrow_cast<std::size_t>(size), '\0');
int64 new_size = event.serialize(event_data);
CHECK(new_size == size);
new_data += event_data;
}
//ASSERT_EQ(data, new_data);
// Binlog::read_sync
{
td::CSlice path("test.binlog");
td::Binlog::destroy(path);
td::write_file(path, data).ensure();
td::Binlog binlog(path.str());
BinlogReader binlog_reader;
binlog.replay_sync(binlog_reader).ensure();
ASSERT_EQ(reader.logevents().size(), binlog_reader.logevents().size());
ASSERT_TRUE(reader.logevents() == binlog_reader.logevents());
}
// Binlog::read_async
{
td::Binlog::destroy(test_binlog_path);
td::write_file(test_binlog_path, data).ensure();
td::Binlog binlog(test_binlog_path.str());
auto binlog_reader = std::make_shared<BinlogReader>();
td::actor::Scheduler scheduler({2});
scheduler.run_in_context([&]() mutable {
binlog.replay_async(binlog_reader, [](Result<td::Unit> res) {
res.ensure();
td::actor::SchedulerContext::get()->stop();
});
});
scheduler.run();
scheduler.stop();
ASSERT_EQ(reader.logevents().size(), binlog_reader->logevents().size());
ASSERT_TRUE(reader.logevents() == binlog_reader->logevents());
}
}
TEST(Binlog, Reader) {
RandomBinlog binlog;
test_binlog(binlog.data(), binlog.logevents());
}
TEST(Binlog, Hands) {
std::string binlog = td::base64_decode(
"a0RCBAABKQCRMn1c2DaJhwrptxburpRtrWI2sjGhVbG29bFO0r8DDtAAExjZPKtjAAAAALwGAAAA"
"AAAAFvJq3qfzFCDWap+LUrgBI8sWFayIOQSxkBjV3CWgizHYNomHCum3Fu6ulG2tYjayMaFVsbb1"
"sU7SvwMO0AATGGO89BmAAAAA/////wEAAAAAAAAAAAAAgN4RAAAAAAAAa53L4ziGleZ7K+StAsBd"
"txMxbHHfuB9SJRFp+BMzXfnGnt8TsgFnig7j/xVRjtIsYUVw0rQZJUC0sWQROj0SHvplIkBV9vMp")
.move_as_ok();
test_binlog(binlog);
}
TEST(Buffers, CyclicBufferSimple) {
{
auto reader_writer = td::CyclicBuffer::create();
auto reader = std::move(reader_writer.first);
auto writer = std::move(reader_writer.second);
ASSERT_TRUE(!writer.is_reader_closed());
reader.close_reader(td::Status::Error(2));
ASSERT_TRUE(!reader.is_writer_closed());
ASSERT_TRUE(writer.is_reader_closed());
ASSERT_EQ(2, writer.reader_status().code());
}
{
auto reader_writer = td::CyclicBuffer::create();
auto reader = std::move(reader_writer.first);
auto writer = std::move(reader_writer.second);
ASSERT_TRUE(!reader.is_writer_closed());
writer.close_writer(td::Status::Error(2));
ASSERT_TRUE(!writer.is_reader_closed());
ASSERT_TRUE(reader.is_writer_closed());
ASSERT_EQ(2, reader.writer_status().code());
}
{
td::CyclicBuffer::Options options;
options.chunk_size = 14;
options.count = 10;
options.alignment = 7;
auto reader_writer = td::CyclicBuffer::create(options);
auto reader = std::move(reader_writer.first);
auto writer = std::move(reader_writer.second);
auto data = td::rand_string('a', 'z', 100001);
td::Slice write_slice = data;
td::Slice read_slice = data;
for (size_t i = 1; (int)i < options.count; i++) {
ASSERT_EQ((i - 1) * options.chunk_size, reader.reader_size());
ASSERT_EQ((i - 1) * options.chunk_size, writer.writer_size());
auto slice = writer.prepare_write();
ASSERT_EQ(0u, reinterpret_cast<td::uint64>(slice.data()) % options.alignment);
auto to_copy = write_slice;
to_copy.truncate(options.chunk_size);
slice.copy_from(to_copy);
write_slice = write_slice.substr(to_copy.size());
writer.confirm_write(to_copy.size());
ASSERT_EQ(i * options.chunk_size, reader.reader_size());
ASSERT_EQ(i * options.chunk_size, writer.writer_size());
}
bool is_writer_closed = false;
while (true) {
{
bool is_closed = reader.is_writer_closed();
auto slice = reader.prepare_read();
ASSERT_EQ(read_slice.substr(0, slice.size()), slice);
read_slice = read_slice.substr(slice.size());
reader.confirm_read(slice.size());
if (is_closed && slice.empty()) {
break;
}
}
if (!is_writer_closed) {
auto slice = writer.prepare_write();
auto to_copy = write_slice;
to_copy.truncate(options.chunk_size);
if (to_copy.empty()) {
writer.close_writer(td::Status::OK());
is_writer_closed = true;
} else {
slice.copy_from(to_copy);
write_slice = write_slice.substr(to_copy.size());
writer.confirm_write(to_copy.size());
}
}
}
ASSERT_EQ(0u, write_slice.size());
ASSERT_EQ(0u, read_slice.size());
}
}
TEST(Buffers, CyclicBuffer) {
for (int t = 0; t < 20; t++) {
td::CyclicBuffer::Options options;
options.chunk_size = 14;
options.count = 10;
options.alignment = 7;
auto reader_writer = td::CyclicBuffer::create(options);
auto reader = std::move(reader_writer.first);
auto writer = std::move(reader_writer.second);
auto data = td::rand_string('a', 'z', 100001);
auto chunks = td::rand_split(data);
size_t chunk_i = 0;
std::string res;
while (true) {
if (td::Random::fast(0, 1) == 0) {
bool is_closed = reader.is_writer_closed();
auto slice = reader.prepare_read();
res += slice.str();
reader.confirm_read(slice.size());
if (slice.empty() && is_closed) {
reader.writer_status().ensure();
break;
}
}
if (chunk_i < chunks.size() && td::Random::fast(0, 1) == 0) {
auto slice = writer.prepare_write();
auto from = Slice(chunks[chunk_i]);
auto copy = from.substr(0, slice.size());
slice.copy_from(copy);
writer.confirm_write(copy.size());
auto left = from.substr(copy.size());
if (!left.empty()) {
chunks[chunk_i] = left.str();
} else {
chunk_i++;
if (chunk_i == chunks.size()) {
writer.close_writer(td::Status::OK());
}
}
}
}
ASSERT_EQ(data, res);
}
}
TEST(Buffers, ChainBuffer) {
for (int t = 0; t < 20; t++) {
td::ChainBuffer::Options options;
options.chunk_size = 14;
auto reader_writer = td::ChainBuffer::create(options);
auto reader = std::move(reader_writer.first);
auto writer = std::move(reader_writer.second);
auto data = td::rand_string('a', 'z', 100001);
auto chunks = td::rand_split(data);
size_t chunk_i = 0;
std::string res;
while (true) {
if (td::Random::fast(0, 1) == 0) {
bool is_closed = reader.is_writer_closed();
Slice slice;
if (reader.reader_size() != 0) {
slice = reader.prepare_read();
res += slice.str();
reader.confirm_read(slice.size());
}
if (slice.empty() && is_closed) {
reader.writer_status().ensure();
break;
}
}
if (chunk_i < chunks.size() && td::Random::fast(0, 1) == 0) {
writer.append(chunks[chunk_i]);
chunk_i++;
if (chunk_i == chunks.size()) {
writer.close_writer(td::Status::OK());
}
}
}
ASSERT_EQ(data.size(), res.size());
ASSERT_EQ(data, res);
}
}

685
tddb/test/io-bench.cpp Normal file
View file

@ -0,0 +1,685 @@
/*
This file is part of TON Blockchain source code.
TON Blockchain is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
TON Blockchain is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with TON Blockchain. If not, see <http://www.gnu.org/licenses/>.
In addition, as a special exception, the copyright holders give permission
to link the code of portions of this program with the OpenSSL library.
You must obey the GNU General Public License in all respects for all
of the code used other than OpenSSL. If you modify file(s) with this
exception, you may extend this exception to your version of the file(s),
but you are not obligated to do so. If you do not wish to do so, delete this
exception statement from your version. If you delete this exception statement
from all source files in the program, then also delete it here.
Copyright 2017-2019 Telegram Systems LLP
*/
#include "td/utils/OptionsParser.h"
#include "td/utils/filesystem.h"
#include "td/utils/port/FileFd.h"
#include "td/utils/Timer.h"
#include "td/utils/crypto.h"
#include "td/utils/BufferedReader.h"
#include "td/utils/optional.h"
#include "td/actor/actor.h"
#include "td/db/utils/StreamInterface.h"
#include "td/db/utils/ChainBuffer.h"
#include "td/db/utils/CyclicBuffer.h"
#include "td/db/utils/FileSyncState.h"
#include "td/db/utils/StreamToFileActor.h"
#include "td/db/utils/FileToStreamActor.h"
#include <cmath>
namespace td {
class AsyncCyclicBufferReader : public td::actor::Actor {
public:
class Callback {
public:
virtual ~Callback() {
}
virtual void want_more() = 0;
virtual Status process(Slice data) = 0;
virtual void on_closed(Status status) = 0;
};
AsyncCyclicBufferReader(CyclicBuffer::Reader reader, td::unique_ptr<Callback> callback)
: reader_(std::move(reader)), callback_(std::move(callback)) {
}
private:
CyclicBuffer::Reader reader_;
td::unique_ptr<Callback> callback_;
void loop() override {
while (true) {
auto data = reader_.prepare_read();
if (data.empty()) {
if (reader_.is_writer_closed()) {
callback_->on_closed(std::move(reader_.writer_status()));
return stop();
}
callback_->want_more();
return;
}
auto status = callback_->process(data);
if (status.is_error()) {
callback_->on_closed(std::move(status));
}
reader_.confirm_read(data.size());
//TODO: better condition for want_more. May be reader should decide if it is ready for more writes
callback_->want_more();
}
}
};
} // namespace td
class Processor {
public:
void process(td::Slice slice) {
res = crc32c_extend(res, slice);
res2 = crc32c_extend(res2, slice);
}
auto result() {
return res * res2;
}
private:
td::uint32 res{0};
td::uint32 res2{0};
};
void read_baseline(td::CSlice path) {
LOG(ERROR) << "BASELINE";
td::PerfWarningTimer timer("read file");
auto data = td::read_file(path).move_as_ok();
timer.reset();
td::PerfWarningTimer process_timer("process file", 0);
Processor processor;
processor.process(data.as_slice());
process_timer.reset();
LOG(ERROR) << processor.result();
}
void read_buffered(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "BufferedReader";
auto fd = td::FileFd::open(path, td::FileFd::Read).move_as_ok();
td::BufferedReader reader(fd, buffer_size);
std::vector<char> buf(buffer_size);
Processor processor;
while (true) {
auto slice = td::MutableSlice(&buf[0], buf.size());
auto size = reader.read(slice).move_as_ok();
if (size == 0) {
break;
}
processor.process(slice.truncate(size));
}
LOG(ERROR) << processor.result();
}
void read_async(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "Async";
auto fd = td::FileFd::open(path, td::FileFd::Read).move_as_ok();
td::actor::Scheduler scheduler({2});
scheduler.run_in_context([&] {
auto reader_writer = td::CyclicBuffer::create();
//TODO: hide actor
auto reader =
td::actor::create_actor<td::FileToStreamActor>("Reader", std::move(fd), std::move(reader_writer.second));
class Callback : public td::AsyncCyclicBufferReader::Callback {
public:
Callback(td::actor::ActorOwn<> reader) : reader_(std::move(reader)) {
}
void want_more() override {
td::actor::send_signals_later(reader_, td::actor::ActorSignals::wakeup());
}
td::Status process(td::Slice data) override {
processor.process(data);
return td::Status::OK();
}
void on_closed(td::Status status) override {
LOG(ERROR) << processor.result();
td::actor::SchedulerContext::get()->stop();
}
private:
td::actor::ActorOwn<> reader_;
Processor processor;
};
auto reader_copy = reader.get();
auto callback = td::make_unique<Callback>(std::move(reader));
auto processor = td::actor::create_actor<td::AsyncCyclicBufferReader>(
"BufferReader", std::move(reader_writer.first), std::move(callback));
class ReaderCallback : public td::FileToStreamActor::Callback {
public:
ReaderCallback(td::actor::ActorId<> actor) : actor_(std::move(actor)) {
}
void got_more() override {
td::actor::send_signals_later(actor_, td::actor::ActorSignals::wakeup());
}
private:
td::actor::ActorId<> actor_;
};
send_closure(reader_copy, &td::FileToStreamActor::set_callback,
td::make_unique<ReaderCallback>(processor.release()));
});
scheduler.run();
}
static char o_direct_buf[100000000];
void read_o_direct(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "Direct";
auto fd = td::FileFd::open(path, td::FileFd::Read | td::FileFd::Direct).move_as_ok();
size_t align = 4096;
auto *ptr =
reinterpret_cast<char *>((reinterpret_cast<std::uintptr_t>(o_direct_buf) + align - 1) & td::bits_negate64(align));
td::BufferedReader reader(fd, buffer_size);
Processor processor;
while (true) {
auto slice = td::MutableSlice(ptr, buffer_size);
auto size = reader.read(slice).move_as_ok();
if (size == 0) {
break;
}
processor.process(slice.truncate(size));
}
LOG(ERROR) << processor.result();
}
class DataGenerator {
public:
operator bool() const {
return generated_size < total_size;
}
td::string next() {
auto res = words_[2];
generated_size += res.size();
return res;
}
private:
std::vector<std::string> words_{"a", "fjdksalfdfs", std::string(20, 'b'), std::string(1000, 'a')};
size_t total_size = (1 << 20) * 600;
size_t generated_size = 0;
};
void write_baseline(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "Baseline";
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
.move_as_ok();
std::vector<char> buf(buffer_size);
DataGenerator generator;
while (generator) {
auto slice = generator.next();
fd.write(slice).ensure();
}
fd.sync().ensure();
}
void write_buffered(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "Buffered";
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
.move_as_ok();
std::vector<char> buf(buffer_size);
size_t data_size{0};
auto flush = [&]() {
auto slice = td::Slice(buf.data(), data_size);
fd.write(slice).ensure();
//auto io_slice = as_io_slice(slice);
//fd.writev({&io_slice, 1}).ensure();
data_size = 0;
};
auto append = [&](td::Slice slice) {
if (data_size + slice.size() > buffer_size) {
flush();
}
td::MutableSlice(buf.data(), buffer_size).substr(data_size).copy_from(slice);
data_size += slice.size();
};
DataGenerator generator;
while (generator) {
auto slice = generator.next();
append(slice);
}
flush();
fd.sync().ensure();
}
namespace td {
class FileWriter {
public:
FileWriter(FileFd fd, size_t buffer_size) : fd_(std::move(fd)), raw_buffer_(buffer_size) {
reset();
buffer_slices_.reserve(1024);
strings_.reserve(1024);
ios_slices_.reserve(1024);
}
void append(std::string data) {
cached_size_ += data.size();
if (data.size() <= max_copy_size) {
append_copy(data);
} else {
CHECK(strings_.size() < strings_.capacity());
strings_.push_back(std::move(data));
ios_slices_.push_back(as_io_slice(strings_.back()));
should_merge_ = false;
}
try_flush();
}
void append(BufferSlice data) {
cached_size_ += data.size();
if (data.size() <= max_copy_size) {
append_copy(data);
} else {
buffer_slices_.push_back(std::move(data));
ios_slices_.push_back(as_io_slice(strings_.back()));
should_merge_ = false;
}
try_flush();
}
void append(Slice data) {
if (data.size() <= max_copy_size) {
append_copy(data);
try_flush();
} else if (data.size() > min_immediate_write_size) {
ios_slices_.push_back(as_io_slice(data));
flush();
} else {
append(BufferSlice(data));
}
}
void flush() {
if (ios_slices_.empty()) {
return;
}
flushed_size_ += cached_size_;
fd_.writev(ios_slices_).ensure();
reset();
}
void sync() {
flush();
synced_size_ = flushed_size_;
fd_.sync().ensure();
}
bool may_flush() const {
return cached_size_ != 0;
}
size_t total_size() const {
return flushed_size() + cached_size_;
}
size_t flushed_size() const {
return flushed_size_;
}
size_t synced_size() const {
return synced_size_;
}
private:
static constexpr size_t max_cached_size = 256 * (1 << 10);
static constexpr size_t min_immediate_write_size = 32 * (1 << 10);
FileFd fd_;
std::vector<char> raw_buffer_;
size_t max_copy_size = min(raw_buffer_.size() / 8, size_t(4096u));
MutableSlice buffer_;
bool should_merge_ = false;
std::vector<BufferSlice> buffer_slices_;
std::vector<std::string> strings_;
std::vector<IoSlice> ios_slices_;
size_t cached_size_{0};
size_t flushed_size_{0};
size_t synced_size_{0};
void append_copy(Slice data) {
buffer_.copy_from(data);
if (should_merge_) {
auto back = as_slice(ios_slices_.back());
back = Slice(back.data(), back.size() + data.size());
ios_slices_.back() = as_io_slice(back);
} else {
ios_slices_.push_back(as_io_slice(buffer_.substr(0, data.size())));
should_merge_ = true;
}
buffer_ = buffer_.substr(data.size());
}
void reset() {
buffer_ = MutableSlice(raw_buffer_.data(), raw_buffer_.size());
buffer_slices_.clear();
strings_.clear();
ios_slices_.clear();
should_merge_ = false;
cached_size_ = 0;
}
bool must_flush() const {
return buffer_.size() < max_copy_size || ios_slices_.size() == ios_slices_.capacity() ||
cached_size_ >= max_cached_size;
}
void try_flush() {
if (!must_flush()) {
return;
}
flush();
}
};
class AsyncFileWriterActor : public actor::Actor {
public:
AsyncFileWriterActor(FileSyncState::Reader state) : state_(std::move(state)) {
io_slices_.reserve(100);
}
private:
FileFd fd_;
ChainBufferReader reader_;
FileSyncState::Reader state_;
std::vector<IoSlice> io_slices_;
size_t flushed_size_{0};
size_t synced_size_{0};
void flush() {
reader_.sync_with_writer();
while (!reader_.empty()) {
auto it = reader_.clone();
size_t io_slices_size = 0;
while (!it.empty() && io_slices_.size() < io_slices_.capacity()) {
auto slice = it.prepare_read();
io_slices_.push_back(as_io_slice(slice));
io_slices_size += slice.size();
it.confirm_read(slice.size());
}
if (!io_slices_.empty()) {
auto r_written = fd_.writev(io_slices_);
LOG_IF(FATAL, r_written.is_error()) << r_written.error();
auto written = r_written.move_as_ok();
CHECK(written == io_slices_size);
flushed_size_ += written;
io_slices_.clear();
}
reader_ = std::move(it);
}
}
void loop() override {
reader_.sync_with_writer();
flush();
}
};
} // namespace td
void write_vector(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "io vector";
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
.move_as_ok();
td::FileWriter writer(std::move(fd), buffer_size);
DataGenerator generator;
while (generator) {
auto slice = generator.next();
writer.append(std::move(slice));
}
writer.sync();
}
void write_async(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "Async";
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
.move_as_ok();
td::actor::Scheduler scheduler({1});
scheduler.run_in_context([&] {
class Writer : public td::actor::Actor {
public:
Writer(td::FileFd fd, size_t buffer_size) : fd_(std::move(fd)), buffer_size_(buffer_size) {
}
class Callback : public td::StreamToFileActor::Callback {
public:
Callback(td::actor::ActorShared<> parent) : parent_(std::move(parent)) {
}
void on_sync_state_changed() override {
td::actor::send_signals_later(parent_, td::actor::ActorSignals::wakeup());
}
private:
td::actor::ActorShared<> parent_;
};
void start_up() override {
auto buffer_reader_writer = td::ChainBuffer::create();
buffer_writer_ = std::move(buffer_reader_writer.second);
auto buffer_reader = std::move(buffer_reader_writer.first);
auto sync_state_reader_writer = td::FileSyncState::create();
fd_sync_state_ = std::move(sync_state_reader_writer.first);
auto sync_state_writer = std::move(sync_state_reader_writer.second);
auto options = td::StreamToFileActor::Options{};
writer_ = td::actor::create_actor<td::StreamToFileActor>(td::actor::ActorOptions().with_name("FileWriterActor"),
std::move(buffer_reader), std::move(fd_),
std::move(sync_state_writer), options);
send_closure(writer_, &td::StreamToFileActor::set_callback, td::make_unique<Callback>(actor_shared(this)));
loop();
}
private:
td::FileFd fd_;
td::optional<td::ChainBuffer::Writer> buffer_writer_;
td::optional<td::FileSyncState::Reader> fd_sync_state_;
td::actor::ActorOwn<td::StreamToFileActor> writer_;
size_t buffer_size_;
DataGenerator generator_;
size_t total_size_{0};
bool was_sync_{false};
void loop() override {
auto flushed_size = fd_sync_state_.value().flushed_size();
while (generator_ && total_size_ < flushed_size + buffer_size_ * 10) {
auto str = generator_.next();
total_size_ += str.size();
buffer_writer_.value().append(str);
}
td::actor::send_signals_later(writer_, td::actor::ActorSignals::wakeup());
if (generator_) {
return;
} else if (!was_sync_) {
was_sync_ = true;
fd_sync_state_.value().set_requested_sync_size(total_size_);
td::actor::send_signals_later(writer_, td::actor::ActorSignals::wakeup());
}
if (fd_sync_state_.value().synced_size() == total_size_) {
writer_.reset();
}
}
void hangup_shared() override {
td::actor::SchedulerContext::get()->stop();
stop();
}
};
td::actor::create_actor<Writer>("Writer", std::move(fd), buffer_size).release();
});
scheduler.run();
}
void write_async2(td::CSlice path, size_t buffer_size) {
LOG(ERROR) << "Async2";
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
.move_as_ok();
td::actor::Scheduler scheduler({1});
scheduler.run_in_context([&] {
class Worker : public td::actor::Actor {
public:
Worker(td::FileFd fd, td::ChainBufferReader reader, td::actor::ActorShared<> parent)
: fd_(std::move(fd)), reader_(std::move(reader)), parent_(std::move(parent)) {
}
private:
td::FileFd fd_;
td::ChainBufferReader reader_;
td::actor::ActorShared<> parent_;
void loop() override {
reader_.sync_with_writer();
while (!reader_.empty()) {
auto slice = reader_.prepare_read();
fd_.write(slice).ensure();
reader_.confirm_read(slice.size());
}
}
void hangup() override {
loop();
fd_.sync().ensure();
stop();
}
};
class Writer : public td::actor::Actor {
public:
Writer(td::FileFd fd) : fd_(std::move(fd)) {
}
private:
td::FileFd fd_;
td::actor::ActorOwn<> worker_;
td::ChainBufferWriter writer_;
DataGenerator generator_;
void start_up() override {
worker_ =
td::actor::create_actor<Worker>("Worker", std::move(fd_), writer_.extract_reader(), actor_shared(this));
while (generator_) {
writer_.append(generator_.next(), 65536);
send_signals_later(worker_, td::actor::ActorSignals::wakeup());
}
worker_.reset();
}
void hangup_shared() override {
td::actor::SchedulerContext::get()->stop();
stop();
}
};
td::actor::create_actor<Writer>(td::actor::ActorOptions().with_name("Writer").with_poll(), std::move(fd)).release();
});
scheduler.run();
}
int main(int argc, char **argv) {
std::string from;
enum Type { Read, Write };
Type type{Write};
enum Mode { Baseline, Buffered, Direct, Async, WriteV, Async2 };
Mode mode = Baseline;
size_t buffer_size = 1024;
td::OptionsParser options_parser;
options_parser.add_option('f', td::Slice("from"), td::Slice("read from file"), [&](td::Slice arg) -> td::Status {
from = arg.str();
return td::Status::OK();
});
options_parser.add_option('m', td::Slice("mode"), td::Slice("mode"), [&](td::Slice arg) -> td::Status {
TRY_RESULT(x, td::to_integer_safe<int>(arg));
switch (x) {
case 0:
mode = Baseline;
return td::Status::OK();
case 1:
mode = Buffered;
return td::Status::OK();
case 2:
mode = Direct;
return td::Status::OK();
case 3:
mode = Async;
return td::Status::OK();
case 4:
mode = WriteV;
return td::Status::OK();
case 5:
mode = Async2;
return td::Status::OK();
}
return td::Status::Error("unknown mode");
});
options_parser.add_option('b', td::Slice("buffer"), td::Slice("buffer size"), [&](td::Slice arg) -> td::Status {
TRY_RESULT(x, td::to_integer_safe<size_t>(arg));
buffer_size = x;
return td::Status::OK();
});
auto status = options_parser.run(argc, argv);
if (status.is_error()) {
LOG(ERROR) << status.error() << "\n" << options_parser;
return 0;
}
switch (type) {
case Read:
switch (mode) {
case Baseline:
read_baseline(from);
break;
case Buffered:
read_buffered(from, buffer_size);
break;
case Direct:
read_o_direct(from, buffer_size);
break;
case Async:
read_async(from, buffer_size);
break;
case Async2:
case WriteV:
LOG(FATAL) << "Not supported mode for Read test";
}
break;
case Write:
switch (mode) {
case Baseline:
write_baseline(from, buffer_size);
break;
case Buffered:
write_buffered(from, buffer_size);
break;
case WriteV:
write_vector(from, buffer_size);
break;
case Async:
write_async(from, buffer_size);
break;
case Async2:
write_async2(from, buffer_size);
break;
case Direct:
LOG(FATAL) << "Unimplemented";
}
}
return 0;
}

245
tddb/test/key_value.cpp Normal file
View file

@ -0,0 +1,245 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2019 Telegram Systems LLP
*/
#include "td/utils/tests.h"
#include "td/db/KeyValueAsync.h"
#include "td/db/KeyValue.h"
#include "td/db/RocksDb.h"
#include "td/utils/benchmark.h"
#include "td/utils/buffer.h"
#include "td/utils/optional.h"
#include "td/utils/UInt.h"
TEST(KeyValue, simple) {
td::Slice db_name = "testdb";
td::RocksDb::destroy(db_name).ignore();
std::unique_ptr<td::KeyValue> kv = std::make_unique<td::RocksDb>(td::RocksDb::open(db_name.str()).move_as_ok());
auto set_value = [&](td::Slice key, td::Slice value) { kv->set(key, value); };
auto ensure_value = [&](td::Slice key, td::Slice value) {
std::string kv_value;
auto status = kv->get(key, kv_value).move_as_ok();
ASSERT_EQ(td::int32(status), td::int32(td::KeyValue::GetStatus::Ok));
ASSERT_EQ(kv_value, value);
};
auto ensure_no_value = [&](td::Slice key) {
std::string kv_value;
auto status = kv->get(key, kv_value).move_as_ok();
ASSERT_EQ(td::int32(status), td::int32(td::KeyValue::GetStatus::NotFound));
};
ensure_no_value("A");
set_value("A", "HELLO");
ensure_value("A", "HELLO");
td::UInt128 x;
std::fill(as_slice(x).begin(), as_slice(x).end(), '1');
x.raw[5] = 0;
set_value(as_slice(x), as_slice(x));
ensure_value(as_slice(x), as_slice(x));
kv.reset();
kv = std::make_unique<td::RocksDb>(td::RocksDb::open(db_name.str()).move_as_ok());
ensure_value("A", "HELLO");
ensure_value(as_slice(x), as_slice(x));
};
TEST(KeyValue, async_simple) {
td::Slice db_name = "testdb";
td::RocksDb::destroy(db_name).ignore();
td::actor::Scheduler scheduler({6});
auto watcher = td::create_shared_destructor([] { td::actor::SchedulerContext::get()->stop(); });
class Worker : public td::actor::Actor {
public:
Worker(std::shared_ptr<td::Destructor> watcher, std::string db_name)
: watcher_(std::move(watcher)), db_name_(std::move(db_name)) {
}
void start_up() override {
loop();
}
void tear_down() override {
}
void loop() override {
if (!kv_) {
kv_ = td::KeyValueAsync<td::UInt128, td::BufferSlice>(
std::make_unique<td::RocksDb>(td::RocksDb::open(db_name_).move_as_ok()));
set_start_at_ = td::Timestamp::now();
}
if (next_set_ && next_set_.is_in_past()) {
for (size_t i = 0; i < 10 && left_cnt_ > 0; i++, left_cnt_--) {
do_set();
}
if (left_cnt_ > 0) {
next_set_ = td::Timestamp::in(0.001);
alarm_timestamp() = next_set_;
} else {
next_set_ = td::Timestamp::never();
set_finish_at_ = td::Timestamp::now();
}
}
}
private:
std::shared_ptr<td::Destructor> watcher_;
td::optional<td::KeyValueAsync<td::UInt128, td::BufferSlice>> kv_;
std::string db_name_;
int left_cnt_ = 10000;
int pending_cnt_ = left_cnt_;
td::Timestamp next_set_ = td::Timestamp::now();
td::Timestamp set_start_at_;
td::Timestamp set_finish_at_;
void do_set() {
td::UInt128 key;
td::Random::secure_bytes(as_slice(key));
td::BufferSlice data(1024);
td::Random::secure_bytes(as_slice(data));
kv_.value().set(key, std::move(data), [actor_id = actor_id(this)](td::Result<td::Unit> res) {
res.ensure();
send_closure(actor_id, &Worker::on_stored);
});
}
void on_stored() {
pending_cnt_--;
if (pending_cnt_ == 0) {
auto now = td::Timestamp::now();
LOG(ERROR) << (now.at() - set_finish_at_.at());
LOG(ERROR) << (set_finish_at_.at() - set_start_at_.at());
stop();
}
}
};
scheduler.run_in_context([watcher = std::move(watcher), &db_name]() mutable {
td::actor::create_actor<Worker>("Worker", watcher, db_name.str()).release();
watcher.reset();
});
scheduler.run();
};
class KeyValueBenchmark : public td::Benchmark {
public:
std::string get_description() const override {
return "kv transation benchmark";
}
void start_up() override {
td::RocksDb::destroy("ttt");
db_ = td::RocksDb::open("ttt").move_as_ok();
}
void tear_down() override {
db_ = {};
}
void run(int n) override {
for (int i = 0; i < n; i++) {
db_.value().begin_transaction();
db_.value().set(PSLICE() << i, PSLICE() << i);
db_.value().commit_transaction();
}
}
private:
td::optional<td::RocksDb> db_;
};
TEST(KeyValue, Bench) {
td::bench(KeyValueBenchmark());
}
TEST(KeyValue, Stress) {
return;
td::Slice db_name = "testdb";
td::RocksDb::destroy(db_name).ignore();
td::actor::Scheduler scheduler({6});
auto watcher = td::create_shared_destructor([] { td::actor::SchedulerContext::get()->stop(); });
class Worker : public td::actor::Actor {
public:
Worker(std::shared_ptr<td::Destructor> watcher, std::string db_name)
: watcher_(std::move(watcher)), db_name_(std::move(db_name)) {
}
void start_up() override {
loop();
}
void tear_down() override {
}
void loop() override {
if (!kv_) {
kv_ = td::KeyValueAsync<td::UInt128, td::BufferSlice>(
std::make_unique<td::RocksDb>(td::RocksDb::open(db_name_).move_as_ok()));
set_start_at_ = td::Timestamp::now();
}
if (next_set_ && next_set_.is_in_past()) {
for (size_t i = 0; i < 10 && left_cnt_ > 0; i++, left_cnt_--) {
do_set();
}
if (left_cnt_ > 0) {
next_set_ = td::Timestamp::in(0.01);
alarm_timestamp() = next_set_;
} else {
next_set_ = td::Timestamp::never();
set_finish_at_ = td::Timestamp::now();
}
}
}
private:
std::shared_ptr<td::Destructor> watcher_;
td::optional<td::KeyValueAsync<td::UInt128, td::BufferSlice>> kv_;
std::string db_name_;
int left_cnt_ = 1000000000;
int pending_cnt_ = left_cnt_;
td::Timestamp next_set_ = td::Timestamp::now();
td::Timestamp set_start_at_;
td::Timestamp set_finish_at_;
void do_set() {
td::UInt128 key = td::UInt128::zero();
td::Random::secure_bytes(as_slice(key).substr(0, 1));
td::BufferSlice data(1024);
td::Random::secure_bytes(as_slice(data));
kv_.value().set(key, std::move(data), [actor_id = actor_id(this)](td::Result<td::Unit> res) {
res.ensure();
send_closure(actor_id, &Worker::on_stored);
});
}
void on_stored() {
pending_cnt_--;
if (pending_cnt_ == 0) {
auto now = td::Timestamp::now();
LOG(ERROR) << (now.at() - set_finish_at_.at());
LOG(ERROR) << (set_finish_at_.at() - set_start_at_.at());
stop();
}
}
};
scheduler.run_in_context([watcher = std::move(watcher), &db_name]() mutable {
td::actor::create_actor<Worker>("Worker", watcher, db_name.str()).release();
watcher.reset();
});
scheduler.run();
}