1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-03-09 15:40:10 +00:00

Merge branch 'testnet' into block-generation

This commit is contained in:
SpyCheese 2023-06-02 13:34:00 +03:00
commit e4e77c16c5
463 changed files with 29976 additions and 2517 deletions

View file

@ -74,7 +74,7 @@ void ArchiveManager::add_handle(BlockHandle handle, td::Promise<td::Unit> promis
}
void ArchiveManager::update_handle(BlockHandle handle, td::Promise<td::Unit> promise) {
FileDescription *f;
const FileDescription *f;
if (handle->handle_moved_to_archive()) {
CHECK(handle->inited_unix_time());
if (!handle->need_flush()) {
@ -439,15 +439,15 @@ void ArchiveManager::check_persistent_state(BlockIdExt block_id, BlockIdExt mast
void ArchiveManager::get_block_by_unix_time(AccountIdPrefixFull account_id, UnixTime ts,
td::Promise<ConstBlockHandle> promise) {
auto f = get_file_desc_by_unix_time(account_id, ts, false);
if (f) {
auto n = f;
do {
n = get_next_file_desc(n);
} while (n != nullptr && !n->has_account_prefix(account_id));
auto f1 = get_file_desc_by_unix_time(account_id, ts, false);
auto f2 = get_next_file_desc(f1, account_id, false);
if (!f1) {
std::swap(f1, f2);
}
if (f1) {
td::actor::ActorId<ArchiveSlice> aid;
if (n) {
aid = n->file_actor_id();
if (f2) {
aid = f2->file_actor_id();
}
auto P = td::PromiseCreator::lambda(
[aid, account_id, ts, promise = std::move(promise)](td::Result<ConstBlockHandle> R) mutable {
@ -457,7 +457,7 @@ void ArchiveManager::get_block_by_unix_time(AccountIdPrefixFull account_id, Unix
td::actor::send_closure(aid, &ArchiveSlice::get_block_by_unix_time, account_id, ts, std::move(promise));
}
});
td::actor::send_closure(f->file_actor_id(), &ArchiveSlice::get_block_by_unix_time, account_id, ts, std::move(P));
td::actor::send_closure(f1->file_actor_id(), &ArchiveSlice::get_block_by_unix_time, account_id, ts, std::move(P));
} else {
promise.set_error(td::Status::Error(ErrorCode::notready, "ts not in db"));
}
@ -465,15 +465,15 @@ void ArchiveManager::get_block_by_unix_time(AccountIdPrefixFull account_id, Unix
void ArchiveManager::get_block_by_lt(AccountIdPrefixFull account_id, LogicalTime lt,
td::Promise<ConstBlockHandle> promise) {
auto f = get_file_desc_by_lt(account_id, lt, false);
if (f) {
auto n = f;
do {
n = get_next_file_desc(n);
} while (n != nullptr && !n->has_account_prefix(account_id));
auto f1 = get_file_desc_by_lt(account_id, lt, false);
auto f2 = get_next_file_desc(f1, account_id, false);
if (!f1) {
std::swap(f1, f2);
}
if (f1) {
td::actor::ActorId<ArchiveSlice> aid;
if (n) {
aid = n->file_actor_id();
if (f2) {
aid = f2->file_actor_id();
}
auto P = td::PromiseCreator::lambda(
[aid, account_id, lt, promise = std::move(promise)](td::Result<ConstBlockHandle> R) mutable {
@ -483,7 +483,7 @@ void ArchiveManager::get_block_by_lt(AccountIdPrefixFull account_id, LogicalTime
td::actor::send_closure(aid, &ArchiveSlice::get_block_by_lt, account_id, lt, std::move(promise));
}
});
td::actor::send_closure(f->file_actor_id(), &ArchiveSlice::get_block_by_lt, account_id, lt, std::move(P));
td::actor::send_closure(f1->file_actor_id(), &ArchiveSlice::get_block_by_lt, account_id, lt, std::move(P));
} else {
promise.set_error(td::Status::Error(ErrorCode::notready, "lt not in db"));
}
@ -558,11 +558,16 @@ void ArchiveManager::deleted_package(PackageId id, td::Promise<td::Unit> promise
auto it = m.find(id);
CHECK(it != m.end());
CHECK(it->second.deleted);
it->second.clear_actor_id();
it->second.file.reset();
promise.set_value(td::Unit());
}
void ArchiveManager::load_package(PackageId id) {
auto &m = get_file_map(id);
if (m.count(id)) {
LOG(WARNING) << "Duplicate id " << id.name();
return;
}
auto key = create_serialize_tl_object<ton_api::db_files_package_key>(id.id, id.key, id.temp);
std::string value;
@ -595,11 +600,11 @@ void ArchiveManager::load_package(PackageId id) {
desc.file = td::actor::create_actor<ArchiveSlice>("slice", id.id, id.key, id.temp, false, db_root_);
get_file_map(id).emplace(id, std::move(desc));
m.emplace(id, std::move(desc));
}
ArchiveManager::FileDescription *ArchiveManager::get_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno,
UnixTime ts, LogicalTime lt, bool force) {
const ArchiveManager::FileDescription *ArchiveManager::get_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno,
UnixTime ts, LogicalTime lt, bool force) {
auto &f = get_file_map(id);
auto it = f.find(id);
if (it != f.end()) {
@ -607,7 +612,7 @@ ArchiveManager::FileDescription *ArchiveManager::get_file_desc(ShardIdFull shard
return nullptr;
}
if (force && !id.temp) {
update_desc(it->second, shard, seqno, ts, lt);
update_desc(f, it->second, shard, seqno, ts, lt);
}
return &it->second;
}
@ -618,17 +623,18 @@ ArchiveManager::FileDescription *ArchiveManager::get_file_desc(ShardIdFull shard
return add_file_desc(shard, id, seqno, ts, lt);
}
ArchiveManager::FileDescription *ArchiveManager::add_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno,
UnixTime ts, LogicalTime lt) {
const ArchiveManager::FileDescription *ArchiveManager::add_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno,
UnixTime ts, LogicalTime lt) {
auto &f = get_file_map(id);
CHECK(f.count(id) == 0);
FileDescription desc{id, false};
FileDescription new_desc{id, false};
td::mkdir(db_root_ + id.path()).ensure();
std::string prefix = PSTRING() << db_root_ << id.path() << id.name();
desc.file = td::actor::create_actor<ArchiveSlice>("slice", id.id, id.key, id.temp, false, db_root_);
new_desc.file = td::actor::create_actor<ArchiveSlice>("slice", id.id, id.key, id.temp, false, db_root_);
const FileDescription &desc = f.emplace(id, std::move(new_desc));
if (!id.temp) {
update_desc(desc, shard, seqno, ts, lt);
update_desc(f, desc, shard, seqno, ts, lt);
}
std::vector<tl_object_ptr<ton_api::db_files_package_firstBlock>> vec;
@ -652,7 +658,6 @@ ArchiveManager::FileDescription *ArchiveManager::add_file_desc(ShardIdFull shard
for (auto &e : temp_files_) {
tt.push_back(e.first.id);
}
(id.temp ? tt : (id.key ? tk : t)).push_back(id.id);
index_
->set(create_serialize_tl_object<ton_api::db_files_index_key>().as_slice(),
create_serialize_tl_object<ton_api::db_files_index_value>(std::move(t), std::move(tk), std::move(tt))
@ -668,17 +673,16 @@ ArchiveManager::FileDescription *ArchiveManager::add_file_desc(ShardIdFull shard
.ensure();
}
index_->commit_transaction().ensure();
return &f.emplace(id, std::move(desc)).first->second;
return &desc;
}
void ArchiveManager::update_desc(FileDescription &desc, ShardIdFull shard, BlockSeqno seqno, UnixTime ts,
LogicalTime lt) {
void ArchiveManager::update_desc(FileMap &f, const FileDescription &desc, ShardIdFull shard, BlockSeqno seqno,
UnixTime ts, LogicalTime lt) {
auto it = desc.first_blocks.find(shard);
if (it != desc.first_blocks.end() && it->second.seqno <= seqno) {
return;
}
desc.first_blocks[shard] = FileDescription::Desc{seqno, ts, lt};
f.set_shard_first_block(desc, shard, FileDescription::Desc{seqno, ts, lt});
std::vector<tl_object_ptr<ton_api::db_files_package_firstBlock>> vec;
for (auto &e : desc.first_blocks) {
vec.push_back(create_tl_object<ton_api::db_files_package_firstBlock>(e.first.workchain, e.first.shard,
@ -694,150 +698,91 @@ void ArchiveManager::update_desc(FileDescription &desc, ShardIdFull shard, Block
index_->commit_transaction().ensure();
}
ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_seqno(ShardIdFull shard, BlockSeqno seqno,
bool key_block) {
auto &f = get_file_map(PackageId{0, key_block, false});
for (auto it = f.rbegin(); it != f.rend(); it++) {
auto i = it->second.first_blocks.find(shard);
if (i != it->second.first_blocks.end() && i->second.seqno <= seqno) {
if (it->second.deleted) {
return nullptr;
} else {
return &it->second;
}
}
}
return nullptr;
const ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_seqno(ShardIdFull shard, BlockSeqno seqno,
bool key_block) {
return get_file_map(PackageId{0, key_block, false}).get_file_desc_by_seqno(shard, seqno);
}
ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_unix_time(ShardIdFull shard, UnixTime ts,
bool key_block) {
auto &f = get_file_map(PackageId{0, key_block, false});
for (auto it = f.rbegin(); it != f.rend(); it++) {
auto i = it->second.first_blocks.find(shard);
if (i != it->second.first_blocks.end() && i->second.ts <= ts) {
if (it->second.deleted) {
return nullptr;
} else {
return &it->second;
}
}
}
return nullptr;
const ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_unix_time(ShardIdFull shard, UnixTime ts,
bool key_block) {
return get_file_map(PackageId{0, key_block, false}).get_file_desc_by_unix_time(shard, ts);
}
ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_lt(ShardIdFull shard, LogicalTime lt,
bool key_block) {
auto &f = get_file_map(PackageId{0, key_block, false});
for (auto it = f.rbegin(); it != f.rend(); it++) {
auto i = it->second.first_blocks.find(shard);
if (i != it->second.first_blocks.end() && i->second.lt <= lt) {
if (it->second.deleted) {
return nullptr;
} else {
return &it->second;
}
}
}
return nullptr;
const ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_lt(ShardIdFull shard, LogicalTime lt,
bool key_block) {
return get_file_map(PackageId{0, key_block, false}).get_file_desc_by_lt(shard, lt);
}
ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_seqno(AccountIdPrefixFull account, BlockSeqno seqno,
bool key_block) {
auto &f = get_file_map(PackageId{0, key_block, false});
const ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_seqno(AccountIdPrefixFull account,
BlockSeqno seqno, bool key_block) {
if (account.is_masterchain()) {
return get_file_desc_by_seqno(ShardIdFull{masterchainId}, seqno, key_block);
}
for (auto it = f.rbegin(); it != f.rend(); it++) {
if (it->second.deleted) {
continue;
}
bool found = false;
for (int i = 0; i < 60; i++) {
auto shard = shard_prefix(account, i);
auto it2 = it->second.first_blocks.find(shard);
if (it2 != it->second.first_blocks.end()) {
if (it2->second.seqno <= seqno) {
return &it->second;
}
found = true;
} else if (found) {
break;
}
auto &f = get_file_map(PackageId{0, key_block, false});
const FileDescription *result = nullptr;
for (int i = 0; i <= 60; i++) {
const FileDescription *desc = f.get_file_desc_by_seqno(shard_prefix(account, i), seqno);
if (desc && (!result || result->id < desc->id)) {
result = desc;
} else if (result && (!desc || desc->id < result->id)) {
break;
}
}
return nullptr;
return result;
}
ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_unix_time(AccountIdPrefixFull account, UnixTime ts,
bool key_block) {
auto &f = get_file_map(PackageId{0, key_block, false});
const ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_unix_time(AccountIdPrefixFull account,
UnixTime ts, bool key_block) {
if (account.is_masterchain()) {
return get_file_desc_by_unix_time(ShardIdFull{masterchainId}, ts, key_block);
}
for (auto it = f.rbegin(); it != f.rend(); it++) {
if (it->second.deleted) {
continue;
}
bool found = false;
for (int i = 0; i < 60; i++) {
auto shard = shard_prefix(account, i);
auto it2 = it->second.first_blocks.find(shard);
if (it2 != it->second.first_blocks.end()) {
if (it2->second.ts <= ts) {
return &it->second;
}
found = true;
} else if (found) {
break;
}
auto &f = get_file_map(PackageId{0, key_block, false});
const FileDescription *result = nullptr;
for (int i = 0; i <= 60; i++) {
const FileDescription *desc = f.get_file_desc_by_unix_time(shard_prefix(account, i), ts);
if (desc && (!result || result->id < desc->id)) {
result = desc;
} else if (result && (!desc || desc->id < result->id)) {
break;
}
}
return nullptr;
return result;
}
ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_lt(AccountIdPrefixFull account, LogicalTime lt,
bool key_block) {
auto &f = get_file_map(PackageId{0, key_block, false});
const ArchiveManager::FileDescription *ArchiveManager::get_file_desc_by_lt(AccountIdPrefixFull account, LogicalTime lt,
bool key_block) {
if (account.is_masterchain()) {
return get_file_desc_by_lt(ShardIdFull{masterchainId}, lt, key_block);
}
for (auto it = f.rbegin(); it != f.rend(); it++) {
if (it->second.deleted) {
continue;
}
bool found = false;
for (int i = 0; i < 60; i++) {
auto shard = shard_prefix(account, i);
auto it2 = it->second.first_blocks.find(shard);
if (it2 != it->second.first_blocks.end()) {
if (it2->second.lt <= lt) {
return &it->second;
}
found = true;
} else if (found) {
break;
}
auto &f = get_file_map(PackageId{0, key_block, false});
const FileDescription *result = nullptr;
for (int i = 0; i <= 60; i++) {
const FileDescription *desc = f.get_file_desc_by_lt(shard_prefix(account, i), lt);
if (desc && (!result || result->id < desc->id)) {
result = desc;
} else if (result && (!desc || desc->id < result->id)) {
break;
}
}
return nullptr;
return result;
}
ArchiveManager::FileDescription *ArchiveManager::get_next_file_desc(FileDescription *f) {
auto &m = get_file_map(f->id);
auto it = m.find(f->id);
CHECK(it != m.end());
while (true) {
it++;
if (it == m.end()) {
return nullptr;
} else if (!it->second.deleted) {
return &it->second;
const ArchiveManager::FileDescription *ArchiveManager::get_next_file_desc(const FileDescription *f,
AccountIdPrefixFull shard, bool key_block) {
auto &m = get_file_map(PackageId{0, key_block, false});
const FileDescription *result = nullptr;
for (int i = 0; i <= 60; i++) {
const FileDescription *desc = m.get_next_file_desc(shard_prefix(shard, i), f);
if (desc && (!result || desc->id < result->id)) {
result = desc;
} else if (result && (!desc || result->id < desc->id)) {
break;
}
}
return result;
}
ArchiveManager::FileDescription *ArchiveManager::get_temp_file_desc_by_idx(PackageId idx) {
const ArchiveManager::FileDescription *ArchiveManager::get_temp_file_desc_by_idx(PackageId idx) {
auto it = temp_files_.find(idx);
if (it != temp_files_.end()) {
if (it->second.deleted) {
@ -1257,14 +1202,100 @@ void ArchiveManager::truncate(BlockSeqno masterchain_seqno, ConstBlockHandle han
}
}
bool ArchiveManager::FileDescription::has_account_prefix(AccountIdPrefixFull account_id) const {
for (int i = 0; i < 60; i++) {
auto shard = shard_prefix(account_id, i);
if (first_blocks.count(shard)) {
return true;
}
void ArchiveManager::FileMap::shard_index_add(const FileDescription &desc) {
for (const auto &p : desc.first_blocks) {
ShardIndex &s = shards_[p.first];
s.seqno_index_[p.second.seqno] = &desc;
s.lt_index_[p.second.lt] = &desc;
s.unix_time_index_[p.second.ts] = &desc;
s.packages_index_[desc.id] = &desc;
}
return false;
}
void ArchiveManager::FileMap::shard_index_del(const FileDescription &desc) {
for (const auto &p : desc.first_blocks) {
ShardIndex &s = shards_[p.first];
s.seqno_index_.erase(p.second.seqno);
s.lt_index_.erase(p.second.lt);
s.unix_time_index_.erase(p.second.ts);
s.packages_index_.erase(desc.id);
}
}
void ArchiveManager::FileMap::set_shard_first_block(const FileDescription &desc, ShardIdFull shard,
FileDescription::Desc v) {
ShardIndex &s = shards_[shard];
auto &d = const_cast<FileDescription &>(desc);
auto it = d.first_blocks.find(shard);
if (it != d.first_blocks.end()) {
s.seqno_index_.erase(it->second.seqno);
s.lt_index_.erase(it->second.lt);
s.unix_time_index_.erase(it->second.ts);
}
d.first_blocks[shard] = v;
s.seqno_index_[v.seqno] = &d;
s.lt_index_[v.lt] = &d;
s.unix_time_index_[v.ts] = &d;
s.packages_index_[d.id] = &d;
}
const ArchiveManager::FileDescription *ArchiveManager::FileMap::get_file_desc_by_seqno(ShardIdFull shard,
BlockSeqno seqno) const {
auto it = shards_.find(shard);
if (it == shards_.end()) {
return nullptr;
}
const auto &map = it->second.seqno_index_;
auto it2 = map.upper_bound(seqno);
if (it2 == map.begin()) {
return nullptr;
}
--it2;
return it2->second->deleted ? nullptr : it2->second;
}
const ArchiveManager::FileDescription *ArchiveManager::FileMap::get_file_desc_by_lt(ShardIdFull shard,
LogicalTime lt) const {
auto it = shards_.find(shard);
if (it == shards_.end()) {
return nullptr;
}
const auto &map = it->second.lt_index_;
auto it2 = map.upper_bound(lt);
if (it2 == map.begin()) {
return nullptr;
}
--it2;
return it2->second->deleted ? nullptr : it2->second;
}
const ArchiveManager::FileDescription *ArchiveManager::FileMap::get_file_desc_by_unix_time(ShardIdFull shard,
UnixTime ts) const {
auto it = shards_.find(shard);
if (it == shards_.end()) {
return nullptr;
}
const auto &map = it->second.unix_time_index_;
auto it2 = map.upper_bound(ts);
if (it2 == map.begin()) {
return nullptr;
}
--it2;
return it2->second->deleted ? nullptr : it2->second;
}
const ArchiveManager::FileDescription *ArchiveManager::FileMap::get_next_file_desc(ShardIdFull shard,
const FileDescription *desc) const {
auto it = shards_.find(shard);
if (it == shards_.end()) {
return nullptr;
}
const auto &map = it->second.packages_index_;
auto it2 = desc ? map.upper_bound(desc->id) : map.begin();
if (it2 == map.end()) {
return nullptr;
}
return it2->second->deleted ? nullptr : it2->second;
}
} // namespace validator

View file

@ -71,7 +71,6 @@ class ArchiveManager : public td::actor::Actor {
void start_up() override;
void begin_transaction();
void commit_transaction();
void set_async_mode(bool mode, td::Promise<td::Unit> promise);
@ -94,26 +93,83 @@ class ArchiveManager : public td::actor::Actor {
auto file_actor_id() const {
return file.get();
}
void clear_actor_id() {
file.reset();
}
bool has_account_prefix(AccountIdPrefixFull account_id) const;
PackageId id;
bool deleted;
mutable bool deleted;
std::map<ShardIdFull, Desc> first_blocks;
td::actor::ActorOwn<ArchiveSlice> file;
mutable td::actor::ActorOwn<ArchiveSlice> file;
};
std::map<PackageId, FileDescription> files_;
std::map<PackageId, FileDescription> key_files_;
std::map<PackageId, FileDescription> temp_files_;
class FileMap {
public:
std::map<PackageId, FileDescription>::const_iterator begin() const {
return files_.cbegin();
}
std::map<PackageId, FileDescription>::const_iterator end() const {
return files_.cend();
}
std::map<PackageId, FileDescription>::const_reverse_iterator rbegin() const {
return files_.crbegin();
}
std::map<PackageId, FileDescription>::const_reverse_iterator rend() const {
return files_.crend();
}
std::map<PackageId, FileDescription>::const_iterator find(PackageId x) const {
return files_.find(x);
}
size_t count(const PackageId &x) const {
return files_.count(x);
}
size_t size() const {
return files_.size();
}
std::map<PackageId, FileDescription>::const_iterator lower_bound(const PackageId &x) const {
return files_.lower_bound(x);
}
std::map<PackageId, FileDescription>::const_iterator upper_bound(const PackageId &x) const {
return files_.upper_bound(x);
}
void clear() {
files_.clear();
shards_.clear();
}
const FileDescription &emplace(const PackageId &id, FileDescription desc) {
auto it = files_.emplace(id, std::move(desc));
if (it.second) {
shard_index_add(it.first->second);
}
return it.first->second;
}
void erase(std::map<PackageId, FileDescription>::const_iterator it) {
shard_index_del(it->second);
files_.erase(it);
}
void set_shard_first_block(const FileDescription &desc, ShardIdFull shard, FileDescription::Desc v);
const FileDescription *get_file_desc_by_seqno(ShardIdFull shard, BlockSeqno seqno) const;
const FileDescription *get_file_desc_by_lt(ShardIdFull shard, LogicalTime lt) const;
const FileDescription *get_file_desc_by_unix_time(ShardIdFull shard, UnixTime ts) const;
const FileDescription *get_next_file_desc(ShardIdFull shard, const FileDescription *desc) const;
private:
std::map<PackageId, FileDescription> files_;
struct ShardIndex {
std::map<BlockSeqno, const FileDescription *> seqno_index_;
std::map<LogicalTime, const FileDescription *> lt_index_;
std::map<UnixTime, const FileDescription *> unix_time_index_;
std::map<PackageId, const FileDescription *> packages_index_;
};
std::map<ShardIdFull, ShardIndex> shards_;
void shard_index_add(const FileDescription &desc);
void shard_index_del(const FileDescription &desc);
};
FileMap files_, key_files_, temp_files_;
BlockSeqno finalized_up_to_{0};
bool async_mode_ = false;
bool huge_transaction_started_ = false;
td::uint32 huge_transaction_size_ = 0;
auto &get_file_map(const PackageId &p) {
FileMap &get_file_map(const PackageId &p) {
return p.key ? key_files_ : p.temp ? temp_files_ : files_;
}
@ -126,18 +182,19 @@ class ArchiveManager : public td::actor::Actor {
void get_handle_finish(BlockHandle handle, td::Promise<BlockHandle> promise);
void get_file_short_cont(FileReference ref_id, PackageId idx, td::Promise<td::BufferSlice> promise);
FileDescription *get_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno, UnixTime ts, LogicalTime lt,
bool force);
FileDescription *add_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno, UnixTime ts, LogicalTime lt);
void update_desc(FileDescription &desc, ShardIdFull shard, BlockSeqno seqno, UnixTime ts, LogicalTime lt);
FileDescription *get_file_desc_by_seqno(ShardIdFull shard, BlockSeqno seqno, bool key_block);
FileDescription *get_file_desc_by_lt(ShardIdFull shard, LogicalTime lt, bool key_block);
FileDescription *get_file_desc_by_unix_time(ShardIdFull shard, UnixTime ts, bool key_block);
FileDescription *get_file_desc_by_seqno(AccountIdPrefixFull shard, BlockSeqno seqno, bool key_block);
FileDescription *get_file_desc_by_lt(AccountIdPrefixFull shard, LogicalTime lt, bool key_block);
FileDescription *get_file_desc_by_unix_time(AccountIdPrefixFull shard, UnixTime ts, bool key_block);
FileDescription *get_next_file_desc(FileDescription *f);
FileDescription *get_temp_file_desc_by_idx(PackageId idx);
const FileDescription *get_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno, UnixTime ts, LogicalTime lt,
bool force);
const FileDescription *add_file_desc(ShardIdFull shard, PackageId id, BlockSeqno seqno, UnixTime ts, LogicalTime lt);
void update_desc(FileMap &f, const FileDescription &desc, ShardIdFull shard, BlockSeqno seqno, UnixTime ts,
LogicalTime lt);
const FileDescription *get_file_desc_by_seqno(ShardIdFull shard, BlockSeqno seqno, bool key_block);
const FileDescription *get_file_desc_by_lt(ShardIdFull shard, LogicalTime lt, bool key_block);
const FileDescription *get_file_desc_by_unix_time(ShardIdFull shard, UnixTime ts, bool key_block);
const FileDescription *get_file_desc_by_seqno(AccountIdPrefixFull shard, BlockSeqno seqno, bool key_block);
const FileDescription *get_file_desc_by_lt(AccountIdPrefixFull shard, LogicalTime lt, bool key_block);
const FileDescription *get_file_desc_by_unix_time(AccountIdPrefixFull shard, UnixTime ts, bool key_block);
const FileDescription *get_next_file_desc(const FileDescription *f, AccountIdPrefixFull shard, bool key_block);
const FileDescription *get_temp_file_desc_by_idx(PackageId idx);
PackageId get_max_temp_file_desc_idx();
PackageId get_prev_temp_file_desc_idx(PackageId id);

View file

@ -32,6 +32,7 @@
#include "vm/cells/MerkleUpdate.h"
#include <map>
#include <queue>
#include "common/global-version.h"
namespace ton {
@ -40,7 +41,7 @@ using td::Ref;
class Collator final : public td::actor::Actor {
static constexpr int supported_version() {
return 3;
return SUPPORTED_VERSION;
}
static constexpr long long supported_capabilities() {
return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue;
@ -72,7 +73,7 @@ class Collator final : public td::actor::Actor {
Ref<ValidatorSet> validator_set_;
td::actor::ActorId<ValidatorManager> manager;
td::Timestamp timeout;
td::Timestamp soft_timeout_, medium_timeout_;
td::Timestamp queue_cleanup_timeout_, soft_timeout_, medium_timeout_;
td::Promise<BlockCandidate> main_promise;
ton::BlockSeqno last_block_seqno{0};
ton::BlockSeqno prev_mc_block_seqno{0};

View file

@ -66,6 +66,8 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_mastercha
, validator_set_(std::move(validator_set))
, manager(manager)
, timeout(timeout)
// default timeout is 10 seconds, declared in validator/validator-group.cpp:generate_block_candidate:run_collate_query
, queue_cleanup_timeout_(td::Timestamp::at(timeout.at() - 5.0))
, soft_timeout_(td::Timestamp::at(timeout.at() - 3.0))
, medium_timeout_(td::Timestamp::at(timeout.at() - 1.5))
, main_promise(std::move(promise))
@ -535,6 +537,7 @@ bool Collator::unpack_last_mc_state() {
mc_state_root,
block::ConfigInfo::needShardHashes | block::ConfigInfo::needLibraries | block::ConfigInfo::needValidatorSet |
block::ConfigInfo::needWorkchainInfo | block::ConfigInfo::needCapabilities |
block::ConfigInfo::needPrevBlocks |
(is_masterchain() ? block::ConfigInfo::needAccountsRoot | block::ConfigInfo::needSpecialSmc : 0));
if (res.is_error()) {
td::Status err = res.move_as_error();
@ -1424,7 +1427,13 @@ bool Collator::import_new_shard_top_blocks() {
}
LOG(INFO) << "total fees_imported = " << value_flow_.fees_imported.to_str()
<< " ; out of them, total fees_created = " << import_created_.to_str();
value_flow_.fees_collected += value_flow_.fees_imported;
block::CurrencyCollection burned =
config_->get_burning_config().calculate_burned_fees(value_flow_.fees_imported - import_created_);
if (!burned.is_valid()) {
return fatal_error("cannot calculate amount of burned imported fees");
}
value_flow_.burned += burned;
value_flow_.fees_collected += value_flow_.fees_imported - burned;
return true;
}
@ -1847,6 +1856,11 @@ bool Collator::out_msg_queue_cleanup() {
auto res = out_msg_queue_->filter([&](vm::CellSlice& cs, td::ConstBitPtr key, int n) -> int {
assert(n == 352);
// LOG(DEBUG) << "key is " << key.to_hex(n);
if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) {
LOG(WARNING) << "cleaning up outbound queue takes too long, ending";
outq_cleanup_partial_ = true;
return (1 << 30) + 1; // retain all remaining outbound queue entries including this one without processing
}
if (block_full_) {
LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially";
outq_cleanup_partial_ = true;
@ -2234,6 +2248,7 @@ Ref<vm::Cell> Collator::create_ordinary_transaction(Ref<vm::Cell> msg_root) {
register_new_msgs(*trans);
update_max_lt(acc->last_trans_end_lt_);
value_flow_.burned += trans->blackhole_burned;
return trans_root;
}
@ -2307,7 +2322,8 @@ td::Result<std::unique_ptr<block::transaction::Transaction>> Collator::impl_crea
return td::Status::Error(
-669, "cannot create action phase of a new transaction for smart contract "s + acc->addr.to_hex());
}
if (trans->bounce_enabled && (!trans->compute_phase->success || trans->action_phase->state_exceeds_limits) &&
if (trans->bounce_enabled &&
(!trans->compute_phase->success || trans->action_phase->state_exceeds_limits || trans->action_phase->bounce) &&
!trans->prepare_bounce_phase(*action_phase_cfg)) {
return td::Status::Error(
-669, "cannot create bounce phase of a new transaction for smart contract "s + acc->addr.to_hex());
@ -2590,7 +2606,7 @@ bool Collator::precheck_inbound_message(Ref<vm::CellSlice> enq_msg, ton::Logical
return false;
}
if (!block::tlb::t_MsgEnvelope.validate_ref(msg_env)) {
LOG(ERROR) << "inbound internal MsgEnvelope is invalid according to automated checks";
LOG(ERROR) << "inbound internal MsgEnvelope is invalid according to hand-written checks";
return false;
}
return true;
@ -2622,6 +2638,10 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
"its contents";
return false;
}
if (!block::tlb::validate_message_libs(env.msg)) {
LOG(ERROR) << "inbound internal message has invalid StateInit";
return false;
}
// 2.0. update last_proc_int_msg
if (!update_last_proc_int_msg(std::pair<ton::LogicalTime, ton::Bits256>(lt, env.msg->get_hash().bits()))) {
return fatal_error("processing a message AFTER a newer message has been processed");
@ -2997,6 +3017,7 @@ bool Collator::process_new_messages(bool enqueue_only) {
while (!new_msgs.empty()) {
block::NewOutMsg msg = new_msgs.top();
new_msgs.pop();
block_limit_status_->extra_out_msgs--;
if (block_full_ && !enqueue_only) {
LOG(INFO) << "BLOCK FULL, enqueue all remaining new messages";
enqueue_only = true;
@ -3018,6 +3039,7 @@ void Collator::register_new_msg(block::NewOutMsg new_msg) {
min_new_msg_lt = new_msg.lt;
}
new_msgs.push(std::move(new_msg));
block_limit_status_->extra_out_msgs++;
}
void Collator::register_new_msgs(block::transaction::Transaction& trans) {
@ -3767,7 +3789,16 @@ bool Collator::compute_total_balance() {
LOG(ERROR) << "cannot unpack CurrencyCollection from the root of OutMsgDescr";
return false;
}
value_flow_.fees_collected += new_transaction_fees + new_import_fees;
block::CurrencyCollection total_fees = new_transaction_fees + new_import_fees;
value_flow_.fees_collected += total_fees;
if (is_masterchain()) {
block::CurrencyCollection burned = config_->get_burning_config().calculate_burned_fees(total_fees);
if (!burned.is_valid()) {
return fatal_error("cannot calculate amount of burned masterchain fees");
}
value_flow_.fees_collected -= burned;
value_flow_.burned += burned;
}
// 3. compute total_validator_fees
total_validator_fees_ += value_flow_.fees_collected;
total_validator_fees_ -= value_flow_.recovered;
@ -3916,7 +3947,7 @@ bool Collator::create_block() {
}
if (verify >= 1) {
LOG(INFO) << "verifying new Block";
if (!block::gen::t_Block.validate_ref(1000000, new_block)) {
if (!block::gen::t_Block.validate_ref(10000000, new_block)) {
return fatal_error("new Block failed to pass automatic validity tests");
}
}
@ -4077,6 +4108,18 @@ bool Collator::create_block_candidate() {
ton::BlockIdExt{ton::BlockId{shard_, new_block_seqno}, new_block->get_hash().bits(),
block::compute_file_hash(blk_slice.as_slice())},
block::compute_file_hash(cdata_slice.as_slice()), blk_slice.clone(), cdata_slice.clone());
// 3.1 check block and collated data size
auto consensus_config = config_->get_consensus_config();
if (block_candidate->data.size() > consensus_config.max_block_size) {
return fatal_error(PSTRING() << "block size (" << block_candidate->data.size()
<< ") exceeds the limit in consensus config (" << consensus_config.max_block_size
<< ")");
}
if (block_candidate->collated_data.size() > consensus_config.max_collated_data_size) {
return fatal_error(PSTRING() << "collated data size (" << block_candidate->collated_data.size()
<< ") exceeds the limit in consensus config ("
<< consensus_config.max_collated_data_size << ")");
}
// 4. save block candidate
LOG(INFO) << "saving new BlockCandidate";
td::actor::send_closure_later(manager, &ValidatorManager::set_block_candidate, block_candidate->id,
@ -4139,6 +4182,9 @@ td::Result<bool> Collator::register_external_message_cell(Ref<vm::Cell> ext_msg,
if (!block::tlb::t_Message.validate_ref(256, ext_msg)) {
return td::Status::Error("external message is not a (Message Any) according to hand-written checks");
}
if (!block::tlb::validate_message_libs(ext_msg)) {
return td::Status::Error("external message has invalid libs in StateInit");
}
block::gen::CommonMsgInfo::Record_ext_in_msg_info info;
if (!tlb::unpack_cell_inexact(ext_msg, info)) {
return td::Status::Error("cannot unpack external message header");

View file

@ -1192,6 +1192,7 @@ void LiteQuery::finish_getAccountState(td::BufferSlice shard_proof) {
return;
}
auto rconfig = config.move_as_ok();
rconfig->set_block_id_ext(mc_state_->get_block_id());
acc_state_promise_.set_value(std::make_tuple(
std::move(acc_csr), sstate.gen_utime, sstate.gen_lt, std::move(rconfig)
));
@ -1687,13 +1688,23 @@ void LiteQuery::continue_getConfigParams(int mode, std::vector<int> param_list)
}
}
auto res = keyblk ? block::Config::extract_from_key_block(mpb.root(), mode)
: block::Config::extract_from_state(mpb.root(), mode);
if (res.is_error()) {
fatal_error(res.move_as_error());
return;
std::unique_ptr<block::Config> cfg;
if (keyblk || !(mode & block::ConfigInfo::needPrevBlocks)) {
auto res = keyblk ? block::Config::extract_from_key_block(mpb.root(), mode)
: block::Config::extract_from_state(mpb.root(), mode);
if (res.is_error()) {
fatal_error(res.move_as_error());
return;
}
cfg = res.move_as_ok();
} else {
auto res = block::ConfigInfo::extract_config(mpb.root(), mode);
if (res.is_error()) {
fatal_error(res.move_as_error());
return;
}
cfg = res.move_as_ok();
}
auto cfg = res.move_as_ok();
if (!cfg) {
fatal_error("cannot extract configuration from last mc state");
return;
@ -1706,6 +1717,9 @@ void LiteQuery::continue_getConfigParams(int mode, std::vector<int> param_list)
visit(cfg->get_config_param(i));
}
}
if (!keyblk && mode & block::ConfigInfo::needPrevBlocks) {
((block::ConfigInfo*)cfg.get())->get_prev_blocks_info();
}
} catch (vm::VmError& err) {
fatal_error("error while traversing required configuration parameters: "s + err.get_msg());
return;

View file

@ -62,12 +62,10 @@ td::Status ShardTopBlockDescrQ::unpack_one_proof(BlockIdExt& cur_id, Ref<vm::Cel
block::gen::Block::Record blk;
block::gen::BlockInfo::Record info;
block::gen::BlockExtra::Record extra;
block::gen::ValueFlow::Record flow;
block::CurrencyCollection fees_collected, funds_created;
block::ValueFlow flow;
if (!(tlb::unpack_cell(virt_root, blk) && tlb::unpack_cell(blk.info, info) && !info.version &&
block::gen::t_ValueFlow.force_validate_ref(blk.value_flow) && tlb::unpack_cell(blk.value_flow, flow) &&
/*tlb::unpack_cell(blk.extra, extra) &&*/ fees_collected.unpack(flow.fees_collected) &&
funds_created.unpack(flow.r2.created))) {
flow.unpack(vm::load_cell_slice_ref(blk.value_flow)))
/*&& tlb::unpack_cell(blk.extra, extra)*/) {
return td::Status::Error(-666, "cannot unpack block header in link for block "s + cur_id.to_str());
}
// remove this "try ... catch ..." later and uncomment tlb::unpack_cell(blk.extra, extra) in the previous condition
@ -131,7 +129,7 @@ td::Status ShardTopBlockDescrQ::unpack_one_proof(BlockIdExt& cur_id, Ref<vm::Cel
}
chain_mc_blk_ids_.push_back(mc_blkid);
chain_blk_ids_.push_back(cur_id);
chain_fees_.emplace_back(std::move(fees_collected), std::move(funds_created));
chain_fees_.emplace_back(std::move(flow.fees_collected), std::move(flow.created));
creators_.push_back(extra.created_by);
if (!is_head) {
if (info.after_split || info.after_merge) {

View file

@ -682,7 +682,7 @@ bool ValidateQuery::try_unpack_mc_state() {
mc_state_root_,
block::ConfigInfo::needShardHashes | block::ConfigInfo::needLibraries | block::ConfigInfo::needValidatorSet |
block::ConfigInfo::needWorkchainInfo | block::ConfigInfo::needStateExtraRoot |
block::ConfigInfo::needCapabilities |
block::ConfigInfo::needCapabilities | block::ConfigInfo::needPrevBlocks |
(is_masterchain() ? block::ConfigInfo::needAccountsRoot | block::ConfigInfo::needSpecialSmc : 0));
if (res.is_error()) {
return fatal_error(-666, "cannot extract configuration from reference masterchain state "s + mc_blkid_.to_str() +
@ -785,10 +785,20 @@ bool ValidateQuery::fetch_config_params() {
storage_phase_cfg_.delete_due_limit)) {
return fatal_error("cannot unpack current gas prices and limits from masterchain configuration");
}
storage_phase_cfg_.enable_due_payment = config_->get_global_version() >= 4;
compute_phase_cfg_.block_rand_seed = rand_seed_;
compute_phase_cfg_.libraries = std::make_unique<vm::Dictionary>(config_->get_libraries_root(), 256);
compute_phase_cfg_.max_vm_data_depth = size_limits.max_vm_data_depth;
compute_phase_cfg_.global_config = config_->get_root_cell();
compute_phase_cfg_.global_version = config_->get_global_version();
if (compute_phase_cfg_.global_version >= 4) {
auto prev_blocks_info = config_->get_prev_blocks_info();
if (prev_blocks_info.is_error()) {
return fatal_error(prev_blocks_info.move_as_error_prefix(
"cannot fetch prev blocks info from masterchain configuration: "));
}
compute_phase_cfg_.prev_blocks_info = prev_blocks_info.move_as_ok();
}
compute_phase_cfg_.suspended_addresses = config_->get_suspended_addresses(now_);
}
{
@ -811,6 +821,9 @@ bool ValidateQuery::fetch_config_params() {
action_phase_cfg_.workchains = &config_->get_workchain_list();
action_phase_cfg_.bounce_msg_body = (config_->has_capability(ton::capBounceMsgBody) ? 256 : 0);
action_phase_cfg_.size_limits = size_limits;
action_phase_cfg_.action_fine_enabled = config_->get_global_version() >= 4;
action_phase_cfg_.bounce_on_fail_enabled = config_->get_global_version() >= 4;
action_phase_cfg_.mc_blackhole_addr = config_->get_burning_config().blackhole_addr;
}
{
// fetch block_grams_created
@ -2193,13 +2206,13 @@ bool ValidateQuery::unpack_block_data() {
auto outmsg_cs = vm::load_cell_slice_ref(std::move(extra.out_msg_descr));
// run some hand-written checks from block::tlb::
// (automatic tests from block::gen:: have been already run for the entire block)
if (!block::tlb::t_InMsgDescr.validate_upto(1000000, *inmsg_cs)) {
if (!block::tlb::t_InMsgDescr.validate_upto(10000000, *inmsg_cs)) {
return reject_query("InMsgDescr of the new block failed to pass handwritten validity tests");
}
if (!block::tlb::t_OutMsgDescr.validate_upto(1000000, *outmsg_cs)) {
if (!block::tlb::t_OutMsgDescr.validate_upto(10000000, *outmsg_cs)) {
return reject_query("OutMsgDescr of the new block failed to pass handwritten validity tests");
}
if (!block::tlb::t_ShardAccountBlocks.validate_ref(1000000, extra.account_blocks)) {
if (!block::tlb::t_ShardAccountBlocks.validate_ref(10000000, extra.account_blocks)) {
return reject_query("ShardAccountBlocks of the new block failed to pass handwritten validity tests");
}
in_msg_dict_ = std::make_unique<vm::AugmentedDictionary>(std::move(inmsg_cs), 256, block::tlb::aug_InMsgDescr);
@ -2243,6 +2256,11 @@ bool ValidateQuery::unpack_precheck_value_flow(Ref<vm::Cell> value_flow_root) {
return reject_query("ValueFlow of block "s + id_.to_str() +
" is invalid (non-zero recovered value in a non-masterchain block)");
}
if (!is_masterchain() && !value_flow_.burned.is_zero()) {
LOG(INFO) << "invalid value flow: " << os.str();
return reject_query("ValueFlow of block "s + id_.to_str() +
" is invalid (non-zero burned value in a non-masterchain block)");
}
if (!value_flow_.recovered.is_zero() && recover_create_msg_.is_null()) {
return reject_query("ValueFlow of block "s + id_.to_str() +
" has a non-zero recovered fees value, but there is no recovery InMsg");
@ -2325,15 +2343,10 @@ bool ValidateQuery::unpack_precheck_value_flow(Ref<vm::Cell> value_flow_root) {
"cannot unpack CurrencyCollection with total transaction fees from the augmentation of the ShardAccountBlocks "
"dictionary");
}
auto expected_fees = value_flow_.fees_imported + value_flow_.created + transaction_fees_ + import_fees_;
if (value_flow_.fees_collected != expected_fees) {
return reject_query(PSTRING() << "ValueFlow for " << id_.to_str() << " declares fees_collected="
<< value_flow_.fees_collected.to_str() << " but the total message import fees are "
<< import_fees_ << ", the total transaction fees are " << transaction_fees_.to_str()
<< ", creation fee for this block is " << value_flow_.created.to_str()
<< " and the total imported fees from shards are "
<< value_flow_.fees_imported.to_str() << " with a total of "
<< expected_fees.to_str());
if (is_masterchain()) {
auto x = config_->get_burning_config().calculate_burned_fees(transaction_fees_ + import_fees_);
fees_burned_ += x;
total_burned_ += x;
}
return true;
}
@ -4588,7 +4601,8 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT
return reject_query(PSTRING() << "cannot re-create action phase of transaction " << lt << " for smart contract "
<< addr.to_hex());
}
if (trs->bounce_enabled && (!trs->compute_phase->success || trs->action_phase->state_exceeds_limits) &&
if (trs->bounce_enabled &&
(!trs->compute_phase->success || trs->action_phase->state_exceeds_limits || trs->action_phase->bounce) &&
!trs->prepare_bounce_phase(action_phase_cfg_)) {
return reject_query(PSTRING() << "cannot re-create bounce phase of transaction " << lt << " for smart contract "
<< addr.to_hex());
@ -4646,6 +4660,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT
<< "transaction " << lt << " of " << addr.to_hex()
<< " is invalid: it has produced a set of outbound messages different from that listed in the transaction");
}
total_burned_ += trs->blackhole_burned;
// check new balance and value flow
auto new_balance = account.get_balance();
block::CurrencyCollection total_fees;
@ -4653,12 +4668,14 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT
return reject_query(PSTRING() << "transaction " << lt << " of " << addr.to_hex()
<< " has an invalid total_fees value");
}
if (old_balance + money_imported != new_balance + money_exported + total_fees) {
return reject_query(PSTRING() << "transaction " << lt << " of " << addr.to_hex()
<< " violates the currency flow condition: old balance=" << old_balance.to_str()
<< " + imported=" << money_imported.to_str() << " does not equal new balance="
<< new_balance.to_str() << " + exported=" << money_exported.to_str()
<< " + total_fees=" << total_fees.to_str());
if (old_balance + money_imported != new_balance + money_exported + total_fees + trs->blackhole_burned) {
return reject_query(
PSTRING() << "transaction " << lt << " of " << addr.to_hex()
<< " violates the currency flow condition: old balance=" << old_balance.to_str()
<< " + imported=" << money_imported.to_str() << " does not equal new balance=" << new_balance.to_str()
<< " + exported=" << money_exported.to_str() << " + total_fees=" << total_fees.to_str()
<< (trs->blackhole_burned.is_zero() ? ""
: PSTRING() << " burned=" << trs->blackhole_burned.to_str()));
}
return true;
}
@ -5529,6 +5546,9 @@ bool ValidateQuery::check_mc_block_extra() {
return reject_query("invalid fees_imported in value flow: declared "s + value_flow_.fees_imported.to_str() +
", correct value is " + fees_imported.to_str());
}
auto x = config_->get_burning_config().calculate_burned_fees(fees_imported - import_created_);
total_burned_ += x;
fees_burned_ += x;
// ^[ prev_blk_signatures:(HashmapE 16 CryptoSignaturePair)
if (prev_signatures_.not_null() && id_.seqno() == 1) {
return reject_query("block contains non-empty signature set for the zero state of the masterchain");
@ -5546,6 +5566,26 @@ bool ValidateQuery::check_mc_block_extra() {
return true;
}
bool ValidateQuery::postcheck_value_flow() {
auto expected_fees =
value_flow_.fees_imported + value_flow_.created + transaction_fees_ + import_fees_ - fees_burned_;
if (value_flow_.fees_collected != expected_fees) {
return reject_query(PSTRING() << "ValueFlow for " << id_.to_str() << " declares fees_collected="
<< value_flow_.fees_collected.to_str() << " but the total message import fees are "
<< import_fees_ << ", the total transaction fees are " << transaction_fees_.to_str()
<< ", creation fee for this block is " << value_flow_.created.to_str()
<< ", the total imported fees from shards are " << value_flow_.fees_imported.to_str()
<< " and the burned fees are " << fees_burned_.to_str()
<< " with a total of " << expected_fees.to_str());
}
if (total_burned_ != value_flow_.burned) {
return reject_query(PSTRING() << "invalid burned in value flow: " << id_.to_str() << " declared "
<< value_flow_.burned.to_str() << ", correct value is "
<< total_burned_.to_str());
}
return true;
}
Ref<vm::Cell> ValidateQuery::get_virt_state_root(td::Bits256 block_root_hash) {
auto it = virt_roots_.find(block_root_hash);
if (it == virt_roots_.end()) {
@ -5607,7 +5647,7 @@ bool ValidateQuery::try_validate() {
}
}
LOG(INFO) << "running automated validity checks for block candidate " << id_.to_str();
if (!block::gen::t_Block.validate_ref(1000000, block_root_)) {
if (!block::gen::t_Block.validate_ref(10000000, block_root_)) {
return reject_query("block "s + id_.to_str() + " failed to pass automated validity checks");
}
if (!fix_all_processed_upto()) {
@ -5640,9 +5680,10 @@ bool ValidateQuery::try_validate() {
if (!check_in_queue()) {
return reject_query("cannot check inbound message queues");
}
if (!check_delivered_dequeued()) {
// Excessive check: validity of message in queue is checked elsewhere
/*if (!check_delivered_dequeued()) {
return reject_query("cannot check delivery status of all outbound messages");
}
}*/
if (!check_transactions()) {
return reject_query("invalid collection of account transactions in ShardAccountBlocks");
}
@ -5664,6 +5705,9 @@ bool ValidateQuery::try_validate() {
if (!check_mc_state_extra()) {
return reject_query("new McStateExtra is invalid");
}
if (!postcheck_value_flow()) {
return reject_query("new ValueFlow is invalid");
}
} catch (vm::VmError& err) {
return fatal_error(-666, err.get_msg());
} catch (vm::VmVirtError& err) {

View file

@ -28,6 +28,7 @@
#include <vector>
#include <string>
#include <map>
#include "common/global-version.h"
namespace ton {
@ -108,7 +109,7 @@ inline ErrorCtxSet ErrorCtx::set_guard(std::vector<std::string> str_list) {
class ValidateQuery : public td::actor::Actor {
static constexpr int supported_version() {
return 3;
return SUPPORTED_VERSION;
}
static constexpr long long supported_capabilities() {
return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue;
@ -217,7 +218,7 @@ class ValidateQuery : public td::actor::Actor {
std::unique_ptr<vm::AugmentedDictionary> in_msg_dict_, out_msg_dict_, account_blocks_dict_;
block::ValueFlow value_flow_;
block::CurrencyCollection import_created_, transaction_fees_;
block::CurrencyCollection import_created_, transaction_fees_, total_burned_{0}, fees_burned_{0};
td::RefInt256 import_fees_;
ton::LogicalTime proc_lt_{0}, claimed_proc_lt_{0}, min_enq_lt_{~0ULL};
@ -362,6 +363,7 @@ class ValidateQuery : public td::actor::Actor {
bool check_one_prev_dict_update(ton::BlockSeqno seqno, Ref<vm::CellSlice> old_val_extra,
Ref<vm::CellSlice> new_val_extra);
bool check_mc_state_extra();
bool postcheck_value_flow();
td::Status check_counter_update(const block::DiscountedCounter& oc, const block::DiscountedCounter& nc,
unsigned expected_incr);
bool check_one_block_creator_update(td::ConstBitPtr key, Ref<vm::CellSlice> old_val, Ref<vm::CellSlice> new_val);