mirror of
https://github.com/ton-blockchain/ton
synced 2025-02-12 11:12:16 +00:00
bugfixes + decreased archive slice size
This commit is contained in:
parent
148a5e0179
commit
8be3fc99ed
11 changed files with 290 additions and 102 deletions
|
@ -22,7 +22,7 @@
|
|||
|
||||
#include <openssl/opensslv.h>
|
||||
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10101000L && OPENSSL_VERSION_NUMBER != 0x20000000L
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10101000L && OPENSSL_VERSION_NUMBER != 0x20000000L || defined(OPENSSL_IS_BORINGSSL)
|
||||
|
||||
#include "td/utils/base64.h"
|
||||
#include "td/utils/BigNum.h"
|
||||
|
@ -57,7 +57,7 @@ SecureString Ed25519::PrivateKey::as_octet_string() const {
|
|||
return octet_string_.copy();
|
||||
}
|
||||
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10101000L && OPENSSL_VERSION_NUMBER != 0x20000000L
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10101000L && OPENSSL_VERSION_NUMBER != 0x20000000L || defined(OPENSSL_IS_BORINGSSL)
|
||||
|
||||
namespace detail {
|
||||
|
||||
|
|
|
@ -31,10 +31,12 @@
|
|||
#include "block-db.h"
|
||||
#include "block-auto.h"
|
||||
#include "block-parse.h"
|
||||
#include "mc-config.h"
|
||||
#include "vm/cp0.h"
|
||||
#include <getopt.h>
|
||||
|
||||
using td::Ref;
|
||||
using namespace std::literals::string_literals;
|
||||
|
||||
int verbosity;
|
||||
|
||||
|
@ -46,6 +48,12 @@ struct IntError {
|
|||
}
|
||||
};
|
||||
|
||||
void throw_err(td::Status err) {
|
||||
if (err.is_error()) {
|
||||
throw IntError{err.to_string()};
|
||||
}
|
||||
}
|
||||
|
||||
td::Ref<vm::Cell> load_boc(std::string filename) {
|
||||
std::cerr << "loading bag-of-cell file " << filename << std::endl;
|
||||
auto bytes_res = block::load_binary_file(filename);
|
||||
|
@ -63,6 +71,8 @@ td::Ref<vm::Cell> load_boc(std::string filename) {
|
|||
return boc.get_root_cell();
|
||||
}
|
||||
|
||||
std::vector<Ref<vm::Cell>> loaded_boc;
|
||||
|
||||
void test1() {
|
||||
block::ShardId id{ton::masterchainId}, id2{ton::basechainId, 0x11efULL << 48};
|
||||
std::cout << '[' << id << "][" << id2 << ']' << std::endl;
|
||||
|
@ -190,6 +200,49 @@ void test2(vm::CellSlice& cs) {
|
|||
<< block::gen::HashmapE{32, block::gen::t_uint16}.validate_upto(1024, cs) << std::endl;
|
||||
}
|
||||
|
||||
td::Status test_vset() {
|
||||
if (loaded_boc.size() != 2) {
|
||||
return td::Status::Error(
|
||||
"must have exactly two boc files (with a masterchain Block and with ConfigParams) for vset compute test");
|
||||
}
|
||||
std::cerr << "running test_vset()\n";
|
||||
TRY_RESULT(config, block::Config::unpack_config(vm::load_cell_slice_ref(loaded_boc[1])));
|
||||
std::cerr << "config unpacked\n";
|
||||
auto cv_root = config->get_config_param(34);
|
||||
if (cv_root.is_null()) {
|
||||
return td::Status::Error("no config parameter 34");
|
||||
}
|
||||
std::cerr << "config param #34 obtained\n";
|
||||
TRY_RESULT(cur_validators, block::Config::unpack_validator_set(std::move(cv_root)));
|
||||
// auto vconf = config->get_catchain_validators_config();
|
||||
std::cerr << "validator set unpacked\n";
|
||||
std::cerr << "unpacking ShardHashes\n";
|
||||
block::ShardConfig shards;
|
||||
if (!shards.unpack(vm::load_cell_slice_ref(loaded_boc[0]))) {
|
||||
return td::Status::Error("cannot unpack ShardConfig");
|
||||
}
|
||||
std::cerr << "ShardHashes initialized\n";
|
||||
ton::ShardIdFull shard{0, 0x6e80000000000000};
|
||||
ton::CatchainSeqno cc_seqno = std::max(48763, 48763) + 1 + 1;
|
||||
ton::UnixTime now = 1586169666;
|
||||
cc_seqno = shards.get_shard_cc_seqno(shard);
|
||||
std::cerr << "shard=" << shard.to_str() << " cc_seqno=" << cc_seqno << " time=" << now << std::endl;
|
||||
if (cc_seqno == ~0U) {
|
||||
return td::Status::Error("cannot compute cc_seqno for shard "s + shard.to_str());
|
||||
}
|
||||
auto nodes = config->compute_validator_set(shard, *cur_validators, now, cc_seqno);
|
||||
if (nodes.empty()) {
|
||||
return td::Status::Error(PSTRING() << "compute_validator_set() for " << shard.to_str() << "," << now << ","
|
||||
<< cc_seqno << " returned empty list");
|
||||
}
|
||||
for (auto& x : nodes) {
|
||||
std::cout << "weight=" << x.weight << " key=" << x.key.as_bits256().to_hex() << " addr=" << x.addr.to_hex()
|
||||
<< std::endl;
|
||||
}
|
||||
// ...
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
void usage() {
|
||||
std::cout << "usage: dump-block [-t<typename>][-S][<boc-file>]\n\tor dump-block -h\n\tDumps specified blockchain "
|
||||
"block or state "
|
||||
|
@ -202,8 +255,11 @@ int main(int argc, char* const argv[]) {
|
|||
int new_verbosity_level = VERBOSITY_NAME(INFO);
|
||||
const char* tname = nullptr;
|
||||
const tlb::TLB* type = &block::gen::t_Block;
|
||||
bool vset_compute_test = false;
|
||||
bool store_loaded = false;
|
||||
int dump = 3;
|
||||
auto zerostate = std::make_unique<block::ZerostateInfo>();
|
||||
while ((i = getopt(argc, argv, "CSt:hv:")) != -1) {
|
||||
while ((i = getopt(argc, argv, "CSt:hqv:")) != -1) {
|
||||
switch (i) {
|
||||
case 'C':
|
||||
type = &block::gen::t_VmCont;
|
||||
|
@ -218,6 +274,12 @@ int main(int argc, char* const argv[]) {
|
|||
case 'v':
|
||||
new_verbosity_level = VERBOSITY_NAME(FATAL) + (verbosity = td::to_integer<int>(td::Slice(optarg)));
|
||||
break;
|
||||
case 'q':
|
||||
type = &block::gen::t_ShardHashes;
|
||||
vset_compute_test = true;
|
||||
store_loaded = true;
|
||||
dump = 0;
|
||||
break;
|
||||
case 'h':
|
||||
usage();
|
||||
std::exit(2);
|
||||
|
@ -228,17 +290,22 @@ int main(int argc, char* const argv[]) {
|
|||
}
|
||||
SET_VERBOSITY_LEVEL(new_verbosity_level);
|
||||
try {
|
||||
bool done = false;
|
||||
int loaded = 0;
|
||||
while (optind < argc) {
|
||||
auto boc = load_boc(argv[optind++]);
|
||||
if (boc.is_null()) {
|
||||
std::cerr << "(invalid boc)" << std::endl;
|
||||
std::cerr << "(invalid boc in file" << argv[optind - 1] << ")" << std::endl;
|
||||
std::exit(2);
|
||||
} else {
|
||||
done = true;
|
||||
vm::CellSlice cs{vm::NoVm(), boc};
|
||||
cs.print_rec(std::cout);
|
||||
std::cout << std::endl;
|
||||
if (store_loaded) {
|
||||
loaded_boc.push_back(boc);
|
||||
}
|
||||
++loaded;
|
||||
if (dump & 1) {
|
||||
vm::CellSlice cs{vm::NoVm(), boc};
|
||||
cs.print_rec(std::cout);
|
||||
std::cout << std::endl;
|
||||
}
|
||||
if (!type) {
|
||||
tlb::TypenameLookup dict(block::gen::register_simple_types);
|
||||
type = dict.lookup(tname);
|
||||
|
@ -247,20 +314,31 @@ int main(int argc, char* const argv[]) {
|
|||
std::exit(3);
|
||||
}
|
||||
}
|
||||
type->print_ref(std::cout, boc);
|
||||
std::cout << std::endl;
|
||||
if (dump & 2) {
|
||||
type->print_ref(std::cout, boc);
|
||||
std::cout << std::endl;
|
||||
}
|
||||
bool ok = type->validate_ref(1048576, boc);
|
||||
std::cout << "(" << (ok ? "" : "in") << "valid " << *type << ")" << std::endl;
|
||||
if (vset_compute_test) {
|
||||
if (!ok || loaded > 2) {
|
||||
std::cerr << "fatal: validity check failed\n";
|
||||
exit(3);
|
||||
}
|
||||
type = &block::gen::t_ConfigParams;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!done) {
|
||||
if (vset_compute_test) {
|
||||
throw_err(test_vset());
|
||||
} else if (!loaded) {
|
||||
test1();
|
||||
}
|
||||
} catch (IntError& err) {
|
||||
std::cerr << "caught internal error " << err.err_msg << std::endl;
|
||||
std::cerr << "internal error: " << err.err_msg << std::endl;
|
||||
return 1;
|
||||
} catch (vm::VmError& err) {
|
||||
std::cerr << "caught vm error " << err.get_msg() << std::endl;
|
||||
std::cerr << "vm error: " << err.get_msg() << std::endl;
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
|
|
|
@ -980,8 +980,11 @@ bool TestNode::do_parse_line() {
|
|||
(parse_block_id_ext(domain, blkid) ? get_word_to(domain) : (blkid = mc_last_id_).is_valid()) &&
|
||||
(seekeoln() || parse_int16(cat)) && seekeoln() &&
|
||||
dns_resolve_start(workchain, addr, blkid, domain, cat, step);
|
||||
} else if (word == "allshards") {
|
||||
return eoln() ? get_all_shards() : (parse_block_id_ext(blkid) && seekeoln() && get_all_shards(false, blkid));
|
||||
} else if (word == "allshards" || word == "allshardssave") {
|
||||
std::string filename;
|
||||
return (word.size() <= 9 || get_word_to(filename)) &&
|
||||
(seekeoln() ? get_all_shards(filename)
|
||||
: (parse_block_id_ext(blkid) && seekeoln() && get_all_shards(filename, false, blkid)));
|
||||
} else if (word == "saveconfig") {
|
||||
blkid = mc_last_id_;
|
||||
std::string filename;
|
||||
|
@ -2105,7 +2108,7 @@ void TestNode::got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned
|
|||
out << (incomplete ? "(block transaction list incomplete)" : "(end of block transaction list)") << std::endl;
|
||||
}
|
||||
|
||||
bool TestNode::get_all_shards(bool use_last, ton::BlockIdExt blkid) {
|
||||
bool TestNode::get_all_shards(std::string filename, bool use_last, ton::BlockIdExt blkid) {
|
||||
if (use_last) {
|
||||
blkid = mc_last_id_;
|
||||
}
|
||||
|
@ -2122,7 +2125,7 @@ bool TestNode::get_all_shards(bool use_last, ton::BlockIdExt blkid) {
|
|||
auto b = ton::serialize_tl_object(
|
||||
ton::create_tl_object<ton::lite_api::liteServer_getAllShardsInfo>(ton::create_tl_lite_block_id(blkid)), true);
|
||||
LOG(INFO) << "requesting recent shard configuration";
|
||||
return envelope_send_query(std::move(b), [Self = actor_id(this)](td::Result<td::BufferSlice> R)->void {
|
||||
return envelope_send_query(std::move(b), [ Self = actor_id(this), filename ](td::Result<td::BufferSlice> R)->void {
|
||||
if (R.is_error()) {
|
||||
return;
|
||||
}
|
||||
|
@ -2132,12 +2135,12 @@ bool TestNode::get_all_shards(bool use_last, ton::BlockIdExt blkid) {
|
|||
} else {
|
||||
auto f = F.move_as_ok();
|
||||
td::actor::send_closure_later(Self, &TestNode::got_all_shards, ton::create_block_id(f->id_), std::move(f->proof_),
|
||||
std::move(f->data_));
|
||||
std::move(f->data_), filename);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void TestNode::got_all_shards(ton::BlockIdExt blk, td::BufferSlice proof, td::BufferSlice data) {
|
||||
void TestNode::got_all_shards(ton::BlockIdExt blk, td::BufferSlice proof, td::BufferSlice data, std::string filename) {
|
||||
LOG(INFO) << "got shard configuration with respect to block " << blk.to_str();
|
||||
if (data.empty()) {
|
||||
td::TerminalIO::out() << "shard configuration is empty" << '\n';
|
||||
|
@ -2171,6 +2174,15 @@ void TestNode::got_all_shards(ton::BlockIdExt blk, td::BufferSlice proof, td::Bu
|
|||
}
|
||||
}
|
||||
}
|
||||
if (!filename.empty()) {
|
||||
auto res1 = td::write_file(filename, data.as_slice());
|
||||
if (res1.is_error()) {
|
||||
LOG(ERROR) << "cannot write shard configuration to file `" << filename << "` : " << res1.move_as_error();
|
||||
} else {
|
||||
out << "saved shard configuration (ShardHashes) to file `" << filename << "` (" << data.size() << " bytes)"
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
show_new_blkids();
|
||||
}
|
||||
|
|
|
@ -142,8 +142,8 @@ class TestNode : public td::actor::Actor {
|
|||
std::string domain, std::string qdomain, int cat, int mode, int used_bits,
|
||||
Ref<vm::Cell> value);
|
||||
bool show_dns_record(std::ostream& os, int cat, Ref<vm::Cell> value, bool raw_dump);
|
||||
bool get_all_shards(bool use_last = true, ton::BlockIdExt blkid = {});
|
||||
void got_all_shards(ton::BlockIdExt blk, td::BufferSlice proof, td::BufferSlice data);
|
||||
bool get_all_shards(std::string filename = "", bool use_last = true, ton::BlockIdExt blkid = {});
|
||||
void got_all_shards(ton::BlockIdExt blk, td::BufferSlice proof, td::BufferSlice data, std::string filename);
|
||||
bool get_config_params(ton::BlockIdExt blkid, td::Promise<td::Unit> do_after, int mode = 0, std::string filename = "",
|
||||
std::vector<int> params = {});
|
||||
void got_config_params(ton::BlockIdExt req_blkid, ton::BlockIdExt blkid, td::BufferSlice state_proof,
|
||||
|
|
|
@ -98,7 +98,9 @@ BigNum BigNum::from_binary(Slice str) {
|
|||
}
|
||||
|
||||
BigNum BigNum::from_le_binary(Slice str) {
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL)
|
||||
#if defined(OPENSSL_IS_BORINGSSL)
|
||||
return BigNum(make_unique<Impl>(BN_le2bn(str.ubegin(), narrow_cast<int>(str.size()), nullptr)));
|
||||
#elif OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
|
||||
return BigNum(make_unique<Impl>(BN_lebin2bn(str.ubegin(), narrow_cast<int>(str.size()), nullptr)));
|
||||
#else
|
||||
LOG(FATAL) << "Unsupported from_le_binary";
|
||||
|
@ -221,7 +223,7 @@ string BigNum::to_binary(int exact_size) const {
|
|||
}
|
||||
|
||||
string BigNum::to_le_binary(int exact_size) const {
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER) && !defined(OPENSSL_IS_BORINGSSL)
|
||||
#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER) || defined(OPENSSL_IS_BORINGSSL)
|
||||
int num_size = get_num_bytes();
|
||||
if (exact_size == -1) {
|
||||
exact_size = num_size;
|
||||
|
@ -229,7 +231,11 @@ string BigNum::to_le_binary(int exact_size) const {
|
|||
CHECK(exact_size >= num_size);
|
||||
}
|
||||
string res(exact_size, '\0');
|
||||
#if defined(OPENSSL_IS_BORINGSSL)
|
||||
BN_bn2le_padded(MutableSlice(res).ubegin(), exact_size, impl_->big_num);
|
||||
#else
|
||||
BN_bn2lebinpad(impl_->big_num, MutableSlice(res).ubegin(), exact_size);
|
||||
#endif
|
||||
return res;
|
||||
#else
|
||||
LOG(FATAL) << "Unsupported to_le_binary";
|
||||
|
|
|
@ -540,7 +540,7 @@ void ArchiveManager::load_package(PackageId id) {
|
|||
}
|
||||
}
|
||||
|
||||
desc.file = td::actor::create_actor<ArchiveSlice>("slice", id.id, id.key, id.temp, prefix);
|
||||
desc.file = td::actor::create_actor<ArchiveSlice>("slice", id.id, id.key, id.temp, db_root_);
|
||||
|
||||
get_file_map(id).emplace(id, std::move(desc));
|
||||
}
|
||||
|
@ -574,7 +574,7 @@ ArchiveManager::FileDescription *ArchiveManager::add_file_desc(ShardIdFull shard
|
|||
FileDescription desc{id, false};
|
||||
td::mkdir(db_root_ + id.path()).ensure();
|
||||
std::string prefix = PSTRING() << db_root_ << id.path() << id.name();
|
||||
desc.file = td::actor::create_actor<ArchiveSlice>("slice", id.id, id.key, id.temp, prefix);
|
||||
desc.file = td::actor::create_actor<ArchiveSlice>("slice", id.id, id.key, id.temp, db_root_);
|
||||
if (!id.temp) {
|
||||
update_desc(desc, shard, seqno, ts, lt);
|
||||
}
|
||||
|
|
|
@ -24,32 +24,6 @@ namespace ton {
|
|||
|
||||
namespace validator {
|
||||
|
||||
struct PackageId {
|
||||
td::uint32 id;
|
||||
bool key;
|
||||
bool temp;
|
||||
|
||||
explicit PackageId(td::uint32 id, bool key, bool temp) : id(id), key(key), temp(temp) {
|
||||
}
|
||||
|
||||
bool operator<(const PackageId &with) const {
|
||||
return id < with.id;
|
||||
}
|
||||
bool operator==(const PackageId &with) const {
|
||||
return id == with.id;
|
||||
}
|
||||
|
||||
std::string path() const;
|
||||
std::string name() const;
|
||||
|
||||
bool is_empty() {
|
||||
return id == std::numeric_limits<td::uint32>::max();
|
||||
}
|
||||
static PackageId empty(bool key, bool temp) {
|
||||
return PackageId(std::numeric_limits<td::uint32>::max(), key, temp);
|
||||
}
|
||||
};
|
||||
|
||||
class RootDb;
|
||||
|
||||
class ArchiveManager : public td::actor::Actor {
|
||||
|
|
|
@ -168,8 +168,10 @@ void ArchiveSlice::add_file(BlockHandle handle, FileReference ref_id, td::Buffer
|
|||
promise.set_error(td::Status::Error(ErrorCode::notready, "package already gc'd"));
|
||||
return;
|
||||
}
|
||||
auto &p = choose_package(
|
||||
handle ? handle->id().is_masterchain() ? handle->id().seqno() : handle->masterchain_ref_block() : 0);
|
||||
TRY_RESULT_PROMISE(
|
||||
promise, p,
|
||||
choose_package(
|
||||
handle ? handle->id().is_masterchain() ? handle->id().seqno() : handle->masterchain_ref_block() : 0, true));
|
||||
std::string value;
|
||||
auto R = kv_->get(ref_id.hash().to_hex(), value);
|
||||
R.ensure();
|
||||
|
@ -177,29 +179,35 @@ void ArchiveSlice::add_file(BlockHandle handle, FileReference ref_id, td::Buffer
|
|||
promise.set_value(td::Unit());
|
||||
return;
|
||||
}
|
||||
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), ref_id, promise = std::move(promise)](
|
||||
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), idx = p->idx, ref_id, promise = std::move(promise)](
|
||||
td::Result<std::pair<td::uint64, td::uint64>> R) mutable {
|
||||
if (R.is_error()) {
|
||||
promise.set_error(R.move_as_error());
|
||||
return;
|
||||
}
|
||||
auto v = R.move_as_ok();
|
||||
td::actor::send_closure(SelfId, &ArchiveSlice::add_file_cont, std::move(ref_id), v.first, v.second,
|
||||
td::actor::send_closure(SelfId, &ArchiveSlice::add_file_cont, idx, std::move(ref_id), v.first, v.second,
|
||||
std::move(promise));
|
||||
});
|
||||
|
||||
td::actor::send_closure(p.writer, &PackageWriter::append, ref_id.filename(), std::move(data), std::move(P));
|
||||
td::actor::send_closure(p->writer, &PackageWriter::append, ref_id.filename(), std::move(data), std::move(P));
|
||||
}
|
||||
|
||||
void ArchiveSlice::add_file_cont(FileReference ref_id, td::uint64 offset, td::uint64 size,
|
||||
void ArchiveSlice::add_file_cont(size_t idx, FileReference ref_id, td::uint64 offset, td::uint64 size,
|
||||
td::Promise<td::Unit> promise) {
|
||||
if (destroyed_) {
|
||||
promise.set_error(td::Status::Error(ErrorCode::notready, "package already gc'd"));
|
||||
return;
|
||||
}
|
||||
begin_transaction();
|
||||
kv_->set("status", td::to_string(size)).ensure();
|
||||
kv_->set(ref_id.hash().to_hex(), td::to_string(offset)).ensure();
|
||||
if (sliced_mode_) {
|
||||
kv_->set(PSTRING() << "status." << idx, td::to_string(size)).ensure();
|
||||
kv_->set(ref_id.hash().to_hex(), td::to_string(offset)).ensure();
|
||||
} else {
|
||||
CHECK(!idx);
|
||||
kv_->set("status", td::to_string(size)).ensure();
|
||||
kv_->set(ref_id.hash().to_hex(), td::to_string(offset)).ensure();
|
||||
}
|
||||
commit_transaction();
|
||||
promise.set_value(td::Unit());
|
||||
}
|
||||
|
@ -253,8 +261,6 @@ void ArchiveSlice::get_file(ConstBlockHandle handle, FileReference ref_id, td::P
|
|||
promise.set_error(td::Status::Error(ErrorCode::notready, "package already gc'd"));
|
||||
return;
|
||||
}
|
||||
auto &p = choose_package(
|
||||
handle ? handle->id().is_masterchain() ? handle->id().seqno() : handle->masterchain_ref_block() : 0);
|
||||
std::string value;
|
||||
auto R = kv_->get(ref_id.hash().to_hex(), value);
|
||||
R.ensure();
|
||||
|
@ -263,6 +269,10 @@ void ArchiveSlice::get_file(ConstBlockHandle handle, FileReference ref_id, td::P
|
|||
return;
|
||||
}
|
||||
auto offset = td::to_integer<td::uint64>(value);
|
||||
TRY_RESULT_PROMISE(
|
||||
promise, p,
|
||||
choose_package(
|
||||
handle ? handle->id().is_masterchain() ? handle->id().seqno() : handle->masterchain_ref_block() : 0, false));
|
||||
auto P = td::PromiseCreator::lambda(
|
||||
[promise = std::move(promise)](td::Result<std::pair<std::string, td::BufferSlice>> R) mutable {
|
||||
if (R.is_error()) {
|
||||
|
@ -271,7 +281,7 @@ void ArchiveSlice::get_file(ConstBlockHandle handle, FileReference ref_id, td::P
|
|||
promise.set_value(std::move(R.move_as_ok().second));
|
||||
}
|
||||
});
|
||||
td::actor::create_actor<PackageReader>("reader", p.package, offset, std::move(P)).release();
|
||||
td::actor::create_actor<PackageReader>("reader", p->package, offset, std::move(P)).release();
|
||||
}
|
||||
|
||||
void ArchiveSlice::get_block_common(AccountIdPrefixFull account_id,
|
||||
|
@ -416,36 +426,70 @@ td::BufferSlice ArchiveSlice::get_db_key_block_info(BlockIdExt block_id) {
|
|||
|
||||
void ArchiveSlice::get_slice(td::uint64 archive_id, td::uint64 offset, td::uint32 limit,
|
||||
td::Promise<td::BufferSlice> promise) {
|
||||
if (archive_id != archive_id_) {
|
||||
if (static_cast<td::uint32>(archive_id) != archive_id_) {
|
||||
promise.set_error(td::Status::Error(ErrorCode::error, "bad archive id"));
|
||||
return;
|
||||
}
|
||||
td::actor::create_actor<db::ReadFile>("readfile", prefix_ + ".pack", offset, limit, 0, std::move(promise)).release();
|
||||
auto value = static_cast<td::uint32>(archive_id >> 32);
|
||||
TRY_RESULT_PROMISE(promise, p, choose_package(value, false));
|
||||
td::actor::create_actor<db::ReadFile>("readfile", p->path, offset, limit, 0, std::move(promise)).release();
|
||||
}
|
||||
|
||||
void ArchiveSlice::get_archive_id(BlockSeqno masterchain_seqno, td::Promise<td::uint64> promise) {
|
||||
if (!sliced_mode_) {
|
||||
promise.set_result(archive_id_);
|
||||
} else {
|
||||
TRY_RESULT_PROMISE(promise, p, choose_package(masterchain_seqno, false));
|
||||
promise.set_result(p->id * (1ull << 32) + archive_id_);
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveSlice::start_up() {
|
||||
auto R = Package::open(prefix_ + ".pack", false, true);
|
||||
if (R.is_error()) {
|
||||
LOG(FATAL) << "failed to open/create archive '" << prefix_ << ".pack"
|
||||
<< "': " << R.move_as_error();
|
||||
return;
|
||||
}
|
||||
auto pack = std::make_shared<Package>(R.move_as_ok());
|
||||
kv_ = std::make_shared<td::RocksDb>(td::RocksDb::open(prefix_ + ".index").move_as_ok());
|
||||
PackageId p_id{archive_id_, key_blocks_only_, temp_};
|
||||
std::string db_path = PSTRING() << db_root_ << p_id.path() << p_id.name() << ".index";
|
||||
kv_ = std::make_shared<td::RocksDb>(td::RocksDb::open(db_path).move_as_ok());
|
||||
|
||||
std::string value;
|
||||
auto R2 = kv_->get("status", value);
|
||||
R2.ensure();
|
||||
|
||||
if (R2.move_as_ok() == td::KeyValue::GetStatus::Ok) {
|
||||
auto len = td::to_integer<td::uint64>(value);
|
||||
pack->truncate(len);
|
||||
if (value == "sliced") {
|
||||
sliced_mode_ = true;
|
||||
R2 = kv_->get("slices", value);
|
||||
R2.ensure();
|
||||
auto tot = td::to_integer<td::uint32>(value);
|
||||
R2 = kv_->get("slice_size", value);
|
||||
R2.ensure();
|
||||
slice_size_ = td::to_integer<td::uint32>(value);
|
||||
CHECK(slice_size_ > 0);
|
||||
for (td::uint32 i = 0; i < tot; i++) {
|
||||
R2 = kv_->get(PSTRING() << "status." << i, value);
|
||||
R2.ensure();
|
||||
auto len = td::to_integer<td::uint64>(value);
|
||||
auto v = archive_id_ + slice_size_ * i;
|
||||
add_package(v, len);
|
||||
}
|
||||
} else {
|
||||
auto len = td::to_integer<td::uint64>(value);
|
||||
add_package(archive_id_, len);
|
||||
}
|
||||
} else {
|
||||
pack->truncate(0);
|
||||
if (!temp_ && !key_blocks_only_) {
|
||||
sliced_mode_ = true;
|
||||
kv_->begin_transaction().ensure();
|
||||
kv_->set("status", "sliced").ensure();
|
||||
kv_->set("slices", "1").ensure();
|
||||
kv_->set("slice_size", td::to_string(slice_size_)).ensure();
|
||||
kv_->set("status.0", "0").ensure();
|
||||
kv_->commit_transaction().ensure();
|
||||
} else {
|
||||
kv_->begin_transaction().ensure();
|
||||
kv_->set("status", "0").ensure();
|
||||
kv_->commit_transaction().ensure();
|
||||
}
|
||||
add_package(archive_id_, 0);
|
||||
}
|
||||
|
||||
auto writer = td::actor::create_actor<PackageWriter>("writer", pack);
|
||||
packages_.emplace_back(std::move(pack), std::move(writer), prefix_ + ".pack", 0);
|
||||
}
|
||||
|
||||
void ArchiveSlice::begin_transaction() {
|
||||
|
@ -484,15 +528,46 @@ void ArchiveSlice::set_async_mode(bool mode, td::Promise<td::Unit> promise) {
|
|||
}
|
||||
}
|
||||
|
||||
ArchiveSlice::ArchiveSlice(td::uint32 archive_id, bool key_blocks_only, bool temp, std::string prefix)
|
||||
: archive_id_(archive_id), key_blocks_only_(key_blocks_only), temp_(temp), prefix_(std::move(prefix)) {
|
||||
ArchiveSlice::ArchiveSlice(td::uint32 archive_id, bool key_blocks_only, bool temp, std::string db_root)
|
||||
: archive_id_(archive_id), key_blocks_only_(key_blocks_only), temp_(temp), db_root_(std::move(db_root)) {
|
||||
}
|
||||
|
||||
ArchiveSlice::PackageInfo &ArchiveSlice::choose_package(BlockSeqno masterchain_seqno) {
|
||||
if (temp_ || key_blocks_only_) {
|
||||
return packages_[0];
|
||||
td::Result<ArchiveSlice::PackageInfo *> ArchiveSlice::choose_package(BlockSeqno masterchain_seqno, bool force) {
|
||||
if (temp_ || key_blocks_only_ || !sliced_mode_) {
|
||||
return &packages_[0];
|
||||
}
|
||||
return packages_[0];
|
||||
if (masterchain_seqno < archive_id_) {
|
||||
return td::Status::Error(ErrorCode::notready, "too small masterchain seqno");
|
||||
}
|
||||
auto v = (masterchain_seqno - archive_id_) / slice_size_;
|
||||
if (v >= packages_.size()) {
|
||||
if (!force) {
|
||||
return td::Status::Error(ErrorCode::notready, "too big masterchain seqno");
|
||||
}
|
||||
CHECK(v == packages_.size());
|
||||
begin_transaction();
|
||||
kv_->set("slices", td::to_string(v + 1)).ensure();
|
||||
kv_->set(PSTRING() << "status." << v, "0").ensure();
|
||||
commit_transaction();
|
||||
CHECK((masterchain_seqno - archive_id_) % slice_size_ == 0);
|
||||
add_package(masterchain_seqno, 0);
|
||||
return &packages_[v];
|
||||
} else {
|
||||
return &packages_[v];
|
||||
}
|
||||
}
|
||||
|
||||
void ArchiveSlice::add_package(td::uint32 seqno, td::uint64 size) {
|
||||
PackageId p_id{seqno, key_blocks_only_, temp_};
|
||||
std::string path = PSTRING() << db_root_ << p_id.path() << p_id.name() << ".pack";
|
||||
auto R = Package::open(path, false, true);
|
||||
if (R.is_error()) {
|
||||
LOG(FATAL) << "failed to open/create archive '" << path << "': " << R.move_as_error();
|
||||
return;
|
||||
}
|
||||
auto pack = std::make_shared<Package>(R.move_as_ok());
|
||||
auto writer = td::actor::create_actor<PackageWriter>("writer", pack);
|
||||
packages_.emplace_back(std::move(pack), std::move(writer), seqno, path, 0);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -520,12 +595,16 @@ void ArchiveSlice::destroy(td::Promise<td::Unit> promise) {
|
|||
ig.add_promise(std::move(promise));
|
||||
destroyed_ = true;
|
||||
|
||||
for (auto &p : packages_) {
|
||||
td::unlink(p.path).ensure();
|
||||
}
|
||||
|
||||
packages_.clear();
|
||||
kv_ = nullptr;
|
||||
|
||||
td::unlink(prefix_ + ".pack").ensure();
|
||||
|
||||
delay_action([name = prefix_ + ".index", attempt = 0,
|
||||
PackageId p_id{archive_id_, key_blocks_only_, temp_};
|
||||
std::string db_path = PSTRING() << db_root_ << p_id.path() << p_id.name() << ".index";
|
||||
delay_action([name = db_path, attempt = 0,
|
||||
promise = ig.get_promise()]() mutable { destroy_db(name, attempt, std::move(promise)); },
|
||||
td::Timestamp::in(0.0));
|
||||
}
|
||||
|
|
|
@ -26,6 +26,32 @@ namespace ton {
|
|||
|
||||
namespace validator {
|
||||
|
||||
struct PackageId {
|
||||
td::uint32 id;
|
||||
bool key;
|
||||
bool temp;
|
||||
|
||||
explicit PackageId(td::uint32 id, bool key, bool temp) : id(id), key(key), temp(temp) {
|
||||
}
|
||||
|
||||
bool operator<(const PackageId &with) const {
|
||||
return id < with.id;
|
||||
}
|
||||
bool operator==(const PackageId &with) const {
|
||||
return id == with.id;
|
||||
}
|
||||
|
||||
std::string path() const;
|
||||
std::string name() const;
|
||||
|
||||
bool is_empty() {
|
||||
return id == std::numeric_limits<td::uint32>::max();
|
||||
}
|
||||
static PackageId empty(bool key, bool temp) {
|
||||
return PackageId(std::numeric_limits<td::uint32>::max(), key, temp);
|
||||
}
|
||||
};
|
||||
|
||||
class PackageWriter : public td::actor::Actor {
|
||||
public:
|
||||
PackageWriter(std::shared_ptr<Package> package) : package_(std::move(package)) {
|
||||
|
@ -47,11 +73,9 @@ class PackageWriter : public td::actor::Actor {
|
|||
|
||||
class ArchiveSlice : public td::actor::Actor {
|
||||
public:
|
||||
ArchiveSlice(td::uint32 archive_id, bool key_blocks_only, bool temp, std::string prefix);
|
||||
ArchiveSlice(td::uint32 archive_id, bool key_blocks_only, bool temp, std::string db_root);
|
||||
|
||||
void get_archive_id(BlockSeqno masterchain_seqno, td::Promise<td::uint64> promise) {
|
||||
promise.set_result(archive_id_);
|
||||
}
|
||||
void get_archive_id(BlockSeqno masterchain_seqno, td::Promise<td::uint64> promise);
|
||||
|
||||
void add_handle(BlockHandle handle, td::Promise<td::Unit> promise);
|
||||
void update_handle(BlockHandle handle, td::Promise<td::Unit> promise);
|
||||
|
@ -80,7 +104,8 @@ class ArchiveSlice : public td::actor::Actor {
|
|||
|
||||
private:
|
||||
void written_data(BlockHandle handle, td::Promise<td::Unit> promise);
|
||||
void add_file_cont(FileReference ref_id, td::uint64 offset, td::uint64 size, td::Promise<td::Unit> promise);
|
||||
void add_file_cont(size_t idx, FileReference ref_id, td::uint64 offset, td::uint64 size,
|
||||
td::Promise<td::Unit> promise);
|
||||
|
||||
/* ltdb */
|
||||
td::BufferSlice get_db_key_lt_desc(ShardIdFull shard);
|
||||
|
@ -96,24 +121,28 @@ class ArchiveSlice : public td::actor::Actor {
|
|||
bool destroyed_ = false;
|
||||
bool async_mode_ = false;
|
||||
bool huge_transaction_started_ = false;
|
||||
bool sliced_mode_{false};
|
||||
td::uint32 huge_transaction_size_ = 0;
|
||||
td::uint32 slice_size_{100};
|
||||
|
||||
std::string prefix_;
|
||||
std::string db_root_;
|
||||
std::shared_ptr<td::KeyValue> kv_;
|
||||
|
||||
struct PackageInfo {
|
||||
PackageInfo(std::shared_ptr<Package> package, td::actor::ActorOwn<PackageWriter> writer, std::string path,
|
||||
td::uint32 idx)
|
||||
: package(std::move(package)), writer(std ::move(writer)), path(std::move(path)), idx(idx) {
|
||||
PackageInfo(std::shared_ptr<Package> package, td::actor::ActorOwn<PackageWriter> writer, BlockSeqno id,
|
||||
std::string path, td::uint32 idx)
|
||||
: package(std::move(package)), writer(std ::move(writer)), id(id), path(std::move(path)), idx(idx) {
|
||||
}
|
||||
std::shared_ptr<Package> package;
|
||||
td::actor::ActorOwn<PackageWriter> writer;
|
||||
BlockSeqno id;
|
||||
std::string path;
|
||||
td::uint32 idx;
|
||||
};
|
||||
std::vector<PackageInfo> packages_;
|
||||
|
||||
PackageInfo &choose_package(BlockSeqno masterchain_seqno);
|
||||
td::Result<PackageInfo *> choose_package(BlockSeqno masterchain_seqno, bool force);
|
||||
void add_package(BlockSeqno masterchain_seqno, td::uint64 size);
|
||||
};
|
||||
|
||||
} // namespace validator
|
||||
|
|
|
@ -2965,6 +2965,9 @@ static int update_one_shard(block::McShardHash& info, const block::McShardHash*
|
|||
if (!info.is_fsm_none() && (now >= info.fsm_utime_end() || info.before_split_)) {
|
||||
info.clear_fsm();
|
||||
changed = true;
|
||||
} else if (info.is_fsm_merge() && (!sibling || sibling->before_split_)) {
|
||||
info.clear_fsm();
|
||||
changed = true;
|
||||
}
|
||||
if (wc_info && !info.before_split_) {
|
||||
// workchain present in configuration?
|
||||
|
@ -2977,14 +2980,15 @@ static int update_one_shard(block::McShardHash& info, const block::McShardHash*
|
|||
LOG(INFO) << "preparing to split shard " << info.shard().to_str() << " during " << info.fsm_utime() << " .. "
|
||||
<< info.fsm_utime_end();
|
||||
} else if (info.is_fsm_none() && depth > wc_info->min_split && (info.want_merge_ || depth > wc_info->max_split) &&
|
||||
sibling && sibling->is_fsm_none() && (sibling->want_merge_ || depth > wc_info->max_split)) {
|
||||
sibling && !sibling->before_split_ && sibling->is_fsm_none() &&
|
||||
(sibling->want_merge_ || depth > wc_info->max_split)) {
|
||||
// prepare merge
|
||||
info.set_fsm_merge(now + ton::split_merge_delay, ton::split_merge_interval);
|
||||
changed = true;
|
||||
LOG(INFO) << "preparing to merge shard " << info.shard().to_str() << " with " << sibling->shard().to_str()
|
||||
<< " during " << info.fsm_utime() << " .. " << info.fsm_utime_end();
|
||||
} else if (info.is_fsm_merge() && depth > wc_info->min_split && sibling && sibling->is_fsm_merge() &&
|
||||
now >= info.fsm_utime() && now >= sibling->fsm_utime() &&
|
||||
} else if (info.is_fsm_merge() && depth > wc_info->min_split && sibling && !sibling->before_split_ &&
|
||||
sibling->is_fsm_merge() && now >= info.fsm_utime() && now >= sibling->fsm_utime() &&
|
||||
(depth > wc_info->max_split || (info.want_merge_ && sibling->want_merge_))) {
|
||||
// force merge
|
||||
info.before_merge_ = true;
|
||||
|
|
|
@ -1599,7 +1599,8 @@ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block:
|
|||
}
|
||||
unsigned depth = ton::shard_prefix_length(shard);
|
||||
bool split_cond = ((info.want_split_ || depth < wc_info->min_split) && depth < wc_info->max_split && depth < 60);
|
||||
bool merge_cond = depth > wc_info->min_split && (info.want_merge_ || depth > wc_info->max_split) && sibling &&
|
||||
bool merge_cond = !info.before_split_ && depth > wc_info->min_split &&
|
||||
(info.want_merge_ || depth > wc_info->max_split) && sibling && !sibling->before_split_ &&
|
||||
(sibling->want_merge_ || depth > wc_info->max_split);
|
||||
if (!fsm_inherited && !info.is_fsm_none()) {
|
||||
if (info.fsm_utime() < now_ || info.fsm_utime_end() <= info.fsm_utime() ||
|
||||
|
@ -1618,6 +1619,11 @@ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block:
|
|||
" in new shard configuration, but merge conditions are not met");
|
||||
}
|
||||
}
|
||||
if (info.is_fsm_merge() && (!sibling || sibling->before_split_)) {
|
||||
return reject_query(
|
||||
"future merge for shard "s + shard.to_str() +
|
||||
" is still set in the new shard configuration, but its sibling is absent or has before_split set");
|
||||
}
|
||||
if (info.before_merge_) {
|
||||
if (!sibling || !sibling->before_merge_) {
|
||||
return reject_query("before_merge set for shard "s + shard.to_str() +
|
||||
|
|
Loading…
Reference in a new issue