mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
Improve large OutMsgQueue clearance (#822)
* Improve Collator::opt_msg_queue_cleanup, increase collator timeout * Disable importing ext msgs if queue is too big * Extend timeout in collator if previous block is too old --------- Co-authored-by: SpyCheese <mikle98@yandex.ru>
This commit is contained in:
parent
51baec48a0
commit
7fcf267717
6 changed files with 105 additions and 25 deletions
|
@ -21,6 +21,7 @@
|
||||||
#include "vm/cellslice.h"
|
#include "vm/cellslice.h"
|
||||||
#include "vm/stack.hpp"
|
#include "vm/stack.hpp"
|
||||||
#include "common/bitstring.h"
|
#include "common/bitstring.h"
|
||||||
|
#include "td/utils/Random.h"
|
||||||
|
|
||||||
#include "td/utils/bits.h"
|
#include "td/utils/bits.h"
|
||||||
|
|
||||||
|
@ -2007,7 +2008,7 @@ bool DictionaryFixed::combine_with(DictionaryFixed& dict2) {
|
||||||
|
|
||||||
bool DictionaryFixed::dict_check_for_each(Ref<Cell> dict, td::BitPtr key_buffer, int n, int total_key_len,
|
bool DictionaryFixed::dict_check_for_each(Ref<Cell> dict, td::BitPtr key_buffer, int n, int total_key_len,
|
||||||
const DictionaryFixed::foreach_func_t& foreach_func,
|
const DictionaryFixed::foreach_func_t& foreach_func,
|
||||||
bool invert_first) const {
|
bool invert_first, bool shuffle) const {
|
||||||
if (dict.is_null()) {
|
if (dict.is_null()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -2026,26 +2027,29 @@ bool DictionaryFixed::dict_check_for_each(Ref<Cell> dict, td::BitPtr key_buffer,
|
||||||
key_buffer += l + 1;
|
key_buffer += l + 1;
|
||||||
if (l) {
|
if (l) {
|
||||||
invert_first = false;
|
invert_first = false;
|
||||||
} else if (invert_first) {
|
}
|
||||||
|
bool invert = shuffle ? td::Random::fast(0, 1) == 1: invert_first;
|
||||||
|
if (invert) {
|
||||||
std::swap(c1, c2);
|
std::swap(c1, c2);
|
||||||
}
|
}
|
||||||
key_buffer[-1] = invert_first;
|
key_buffer[-1] = invert;
|
||||||
// recursive check_foreach applied to both children
|
// recursive check_foreach applied to both children
|
||||||
if (!dict_check_for_each(std::move(c1), key_buffer, n - l - 1, total_key_len, foreach_func)) {
|
if (!dict_check_for_each(std::move(c1), key_buffer, n - l - 1, total_key_len, foreach_func, false, shuffle)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
key_buffer[-1] = !invert_first;
|
key_buffer[-1] = !invert;
|
||||||
return dict_check_for_each(std::move(c2), key_buffer, n - l - 1, total_key_len, foreach_func);
|
return dict_check_for_each(std::move(c2), key_buffer, n - l - 1, total_key_len, foreach_func, false, shuffle);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool DictionaryFixed::check_for_each(const foreach_func_t& foreach_func, bool invert_first) {
|
bool DictionaryFixed::check_for_each(const foreach_func_t& foreach_func, bool invert_first, bool shuffle) {
|
||||||
force_validate();
|
force_validate();
|
||||||
if (is_empty()) {
|
if (is_empty()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
int key_len = get_key_bits();
|
int key_len = get_key_bits();
|
||||||
unsigned char key_buffer[max_key_bytes];
|
unsigned char key_buffer[max_key_bytes];
|
||||||
return dict_check_for_each(get_root_cell(), td::BitPtr{key_buffer}, key_len, key_len, foreach_func, invert_first);
|
return dict_check_for_each(get_root_cell(), td::BitPtr{key_buffer}, key_len, key_len, foreach_func, invert_first,
|
||||||
|
shuffle);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool set_bit(td::BitPtr ptr, bool value = true) {
|
static inline bool set_bit(td::BitPtr ptr, bool value = true) {
|
||||||
|
|
|
@ -223,7 +223,7 @@ class DictionaryFixed : public DictionaryBase {
|
||||||
int get_common_prefix(td::BitPtr buffer, unsigned buffer_len);
|
int get_common_prefix(td::BitPtr buffer, unsigned buffer_len);
|
||||||
bool cut_prefix_subdict(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false);
|
bool cut_prefix_subdict(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false);
|
||||||
Ref<vm::Cell> extract_prefix_subdict_root(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false);
|
Ref<vm::Cell> extract_prefix_subdict_root(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false);
|
||||||
bool check_for_each(const foreach_func_t& foreach_func, bool invert_first = false);
|
bool check_for_each(const foreach_func_t& foreach_func, bool invert_first = false, bool shuffle = false);
|
||||||
int filter(filter_func_t check);
|
int filter(filter_func_t check);
|
||||||
bool combine_with(DictionaryFixed& dict2, const combine_func_t& combine_func, int mode = 0);
|
bool combine_with(DictionaryFixed& dict2, const combine_func_t& combine_func, int mode = 0);
|
||||||
bool combine_with(DictionaryFixed& dict2, const simple_combine_func_t& simple_combine_func, int mode = 0);
|
bool combine_with(DictionaryFixed& dict2, const simple_combine_func_t& simple_combine_func, int mode = 0);
|
||||||
|
@ -292,7 +292,7 @@ class DictionaryFixed : public DictionaryBase {
|
||||||
std::pair<Ref<Cell>, bool> extract_prefix_subdict_internal(Ref<Cell> dict, td::ConstBitPtr prefix, int prefix_len,
|
std::pair<Ref<Cell>, bool> extract_prefix_subdict_internal(Ref<Cell> dict, td::ConstBitPtr prefix, int prefix_len,
|
||||||
bool remove_prefix = false) const;
|
bool remove_prefix = false) const;
|
||||||
bool dict_check_for_each(Ref<Cell> dict, td::BitPtr key_buffer, int n, int total_key_len,
|
bool dict_check_for_each(Ref<Cell> dict, td::BitPtr key_buffer, int n, int total_key_len,
|
||||||
const foreach_func_t& foreach_func, bool invert_first = false) const;
|
const foreach_func_t& foreach_func, bool invert_first = false, bool shuffle = false) const;
|
||||||
std::pair<Ref<Cell>, int> dict_filter(Ref<Cell> dict, td::BitPtr key, int n, const filter_func_t& check_leaf,
|
std::pair<Ref<Cell>, int> dict_filter(Ref<Cell> dict, td::BitPtr key, int n, const filter_func_t& check_leaf,
|
||||||
int& skip_rest) const;
|
int& skip_rest) const;
|
||||||
Ref<Cell> dict_combine_with(Ref<Cell> dict1, Ref<Cell> dict2, td::BitPtr key_buffer, int n, int total_key_len,
|
Ref<Cell> dict_combine_with(Ref<Cell> dict1, Ref<Cell> dict2, td::BitPtr key_buffer, int n, int total_key_len,
|
||||||
|
|
|
@ -110,6 +110,7 @@ class Timestamp {
|
||||||
}
|
}
|
||||||
|
|
||||||
friend bool operator==(Timestamp a, Timestamp b);
|
friend bool operator==(Timestamp a, Timestamp b);
|
||||||
|
friend Timestamp &operator+=(Timestamp &a, double b);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
double at_{0};
|
double at_{0};
|
||||||
|
@ -122,6 +123,11 @@ inline bool operator<(const Timestamp &a, const Timestamp &b) {
|
||||||
return a.at() < b.at();
|
return a.at() < b.at();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline Timestamp &operator+=(Timestamp &a, double b) {
|
||||||
|
a.at_ += b;
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
template <class StorerT>
|
template <class StorerT>
|
||||||
void store(const Timestamp ×tamp, StorerT &storer) {
|
void store(const Timestamp ×tamp, StorerT &storer) {
|
||||||
storer.store_binary(timestamp.at() - Time::now() + Clocks::system());
|
storer.store_binary(timestamp.at() - Time::now() + Clocks::system());
|
||||||
|
|
|
@ -703,6 +703,11 @@ bool Collator::unpack_last_mc_state() {
|
||||||
return fatal_error(limits.move_as_error());
|
return fatal_error(limits.move_as_error());
|
||||||
}
|
}
|
||||||
block_limits_ = limits.move_as_ok();
|
block_limits_ = limits.move_as_ok();
|
||||||
|
if (!is_masterchain()) {
|
||||||
|
// block_limits_->bytes = {131072 / 3, 524288 / 3, 1048576 / 3};
|
||||||
|
// block_limits_->gas = {2000000 / 3, 10000000 / 3, 20000000 / 3};
|
||||||
|
block_limits_->lt_delta = {20, 180, 200};
|
||||||
|
}
|
||||||
LOG(DEBUG) << "block limits: bytes [" << block_limits_->bytes.underload() << ", " << block_limits_->bytes.soft()
|
LOG(DEBUG) << "block limits: bytes [" << block_limits_->bytes.underload() << ", " << block_limits_->bytes.soft()
|
||||||
<< ", " << block_limits_->bytes.hard() << "]";
|
<< ", " << block_limits_->bytes.hard() << "]";
|
||||||
LOG(DEBUG) << "block limits: gas [" << block_limits_->gas.underload() << ", " << block_limits_->gas.soft() << ", "
|
LOG(DEBUG) << "block limits: gas [" << block_limits_->gas.underload() << ", " << block_limits_->gas.soft() << ", "
|
||||||
|
@ -1818,6 +1823,17 @@ bool Collator::init_utime() {
|
||||||
CHECK(config_);
|
CHECK(config_);
|
||||||
// consider unixtime and lt from previous block(s) of the same shardchain
|
// consider unixtime and lt from previous block(s) of the same shardchain
|
||||||
prev_now_ = prev_state_utime_;
|
prev_now_ = prev_state_utime_;
|
||||||
|
// Extend collator timeout if previous block is too old
|
||||||
|
td::Timestamp new_timeout = td::Timestamp::in(std::min(30.0, (td::Clocks::system() - (double)prev_now_) / 2));
|
||||||
|
if (timeout < new_timeout) {
|
||||||
|
double add = new_timeout.at() - timeout.at();
|
||||||
|
timeout = new_timeout;
|
||||||
|
queue_cleanup_timeout_ += add;
|
||||||
|
soft_timeout_ += add;
|
||||||
|
medium_timeout_ += add;
|
||||||
|
alarm_timestamp() = timeout;
|
||||||
|
}
|
||||||
|
|
||||||
auto prev = std::max<td::uint32>(config_->utime, prev_now_);
|
auto prev = std::max<td::uint32>(config_->utime, prev_now_);
|
||||||
now_ = std::max<td::uint32>(prev + 1, (unsigned)std::time(nullptr));
|
now_ = std::max<td::uint32>(prev + 1, (unsigned)std::time(nullptr));
|
||||||
if (now_ > now_upper_limit_) {
|
if (now_ > now_upper_limit_) {
|
||||||
|
@ -2170,18 +2186,30 @@ bool Collator::out_msg_queue_cleanup() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto res = out_msg_queue_->filter([&](vm::CellSlice& cs, td::ConstBitPtr key, int n) -> int {
|
auto queue_root = out_msg_queue_->get_root_cell();
|
||||||
|
if (queue_root.is_null()) {
|
||||||
|
LOG(DEBUG) << "out_msg_queue is empty";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
auto old_out_msg_queue = std::make_unique<vm::AugmentedDictionary>(queue_root, 352, block::tlb::aug_OutMsgQueue);
|
||||||
|
|
||||||
|
int deleted = 0;
|
||||||
|
int total = 0;
|
||||||
|
bool fail = false;
|
||||||
|
old_out_msg_queue->check_for_each([&](Ref<vm::CellSlice> value, td::ConstBitPtr key, int n) -> bool {
|
||||||
|
++total;
|
||||||
assert(n == 352);
|
assert(n == 352);
|
||||||
|
vm::CellSlice& cs = value.write();
|
||||||
// LOG(DEBUG) << "key is " << key.to_hex(n);
|
// LOG(DEBUG) << "key is " << key.to_hex(n);
|
||||||
if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) {
|
if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) {
|
||||||
LOG(WARNING) << "cleaning up outbound queue takes too long, ending";
|
LOG(WARNING) << "cleaning up outbound queue takes too long, ending";
|
||||||
outq_cleanup_partial_ = true;
|
outq_cleanup_partial_ = true;
|
||||||
return (1 << 30) + 1; // retain all remaining outbound queue entries including this one without processing
|
return false; // retain all remaining outbound queue entries including this one without processing
|
||||||
}
|
}
|
||||||
if (block_full_) {
|
if (block_full_) {
|
||||||
LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially";
|
LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially";
|
||||||
outq_cleanup_partial_ = true;
|
outq_cleanup_partial_ = true;
|
||||||
return (1 << 30) + 1; // retain all remaining outbound queue entries including this one without processing
|
return false; // retain all remaining outbound queue entries including this one without processing
|
||||||
}
|
}
|
||||||
block::EnqueuedMsgDescr enq_msg_descr;
|
block::EnqueuedMsgDescr enq_msg_descr;
|
||||||
unsigned long long created_lt;
|
unsigned long long created_lt;
|
||||||
|
@ -2190,7 +2218,8 @@ bool Collator::out_msg_queue_cleanup() {
|
||||||
&& enq_msg_descr.check_key(key) // check key
|
&& enq_msg_descr.check_key(key) // check key
|
||||||
&& enq_msg_descr.lt_ == created_lt)) {
|
&& enq_msg_descr.lt_ == created_lt)) {
|
||||||
LOG(ERROR) << "cannot unpack EnqueuedMsg with key " << key.to_hex(n);
|
LOG(ERROR) << "cannot unpack EnqueuedMsg with key " << key.to_hex(n);
|
||||||
return -1;
|
fail = true;
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << ","
|
LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << ","
|
||||||
<< enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_;
|
<< enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_;
|
||||||
|
@ -2208,22 +2237,29 @@ bool Collator::out_msg_queue_cleanup() {
|
||||||
if (delivered) {
|
if (delivered) {
|
||||||
LOG(DEBUG) << "outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex()
|
LOG(DEBUG) << "outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex()
|
||||||
<< ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ << " has been already delivered, dequeueing";
|
<< ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ << " has been already delivered, dequeueing";
|
||||||
|
++deleted;
|
||||||
|
out_msg_queue_->lookup_delete_with_extra(key, n);
|
||||||
if (!dequeue_message(std::move(enq_msg_descr.msg_env_), deliver_lt)) {
|
if (!dequeue_message(std::move(enq_msg_descr.msg_env_), deliver_lt)) {
|
||||||
fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << ","
|
fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << ","
|
||||||
<< enq_msg_descr.hash_.to_hex() << ") by inserting a msg_export_deq record");
|
<< enq_msg_descr.hash_.to_hex() << ") by inserting a msg_export_deq record");
|
||||||
return -1;
|
fail = true;
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
register_out_msg_queue_op();
|
register_out_msg_queue_op();
|
||||||
if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) {
|
if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) {
|
||||||
block_full_ = true;
|
block_full_ = true;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return !delivered;
|
return true;
|
||||||
});
|
}, false, true /* random order */);
|
||||||
LOG(DEBUG) << "deleted " << res << " messages from out_msg_queue";
|
LOG(INFO) << "deleted " << deleted << " messages from out_msg_queue, processed " << total << " messages in total";
|
||||||
if (res < 0) {
|
if (fail) {
|
||||||
return fatal_error("error scanning/updating OutMsgQueue");
|
return fatal_error("error scanning/updating OutMsgQueue");
|
||||||
}
|
}
|
||||||
|
if (outq_cleanup_partial_ || total > 8000) {
|
||||||
|
LOG(INFO) << "out_msg_queue too big, skipping importing external messages";
|
||||||
|
skip_extmsg_ = true;
|
||||||
|
}
|
||||||
auto rt = out_msg_queue_->get_root();
|
auto rt = out_msg_queue_->get_root();
|
||||||
if (verbosity >= 2) {
|
if (verbosity >= 2) {
|
||||||
std::cerr << "new out_msg_queue is ";
|
std::cerr << "new out_msg_queue is ";
|
||||||
|
@ -3277,6 +3313,10 @@ bool Collator::process_inbound_external_messages() {
|
||||||
LOG(INFO) << "skipping processing of inbound external messages";
|
LOG(INFO) << "skipping processing of inbound external messages";
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (out_msg_queue_->get_root_cell().not_null() && out_msg_queue_->get_root_cell()->get_depth() > 12) {
|
||||||
|
LOG(INFO) << "skipping processing of inbound external messages: out msg queue is too big";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
bool full = !block_limit_status_->fits(block::ParamLimits::cl_soft);
|
bool full = !block_limit_status_->fits(block::ParamLimits::cl_soft);
|
||||||
for (auto& ext_msg_pair : ext_msg_list_) {
|
for (auto& ext_msg_pair : ext_msg_list_) {
|
||||||
if (full) {
|
if (full) {
|
||||||
|
|
|
@ -78,6 +78,17 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat
|
||||||
promise.set_error(td::Status::Error(ErrorCode::notready, "too old"));
|
promise.set_error(td::Status::Error(ErrorCode::notready, "too old"));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto next_block_id = create_next_block_id(block.id.root_hash, block.id.file_hash);
|
||||||
|
block.id = next_block_id;
|
||||||
|
|
||||||
|
CacheKey cache_key = block_to_cache_key(block);
|
||||||
|
auto it = approved_candidates_cache_.find(cache_key);
|
||||||
|
if (it != approved_candidates_cache_.end()) {
|
||||||
|
promise.set_result(it->second);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), round_id, block = block.clone(),
|
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), round_id, block = block.clone(),
|
||||||
promise = std::move(promise)](td::Result<ValidateCandidateResult> R) mutable {
|
promise = std::move(promise)](td::Result<ValidateCandidateResult> R) mutable {
|
||||||
if (R.is_error()) {
|
if (R.is_error()) {
|
||||||
|
@ -93,24 +104,32 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat
|
||||||
td::Timestamp::in(0.1));
|
td::Timestamp::in(0.1));
|
||||||
} else {
|
} else {
|
||||||
auto v = R.move_as_ok();
|
auto v = R.move_as_ok();
|
||||||
v.visit(td::overloaded([&](UnixTime ts) { promise.set_result(ts); },
|
v.visit(td::overloaded(
|
||||||
[&](CandidateReject reject) {
|
[&](UnixTime ts) {
|
||||||
promise.set_error(td::Status::Error(ErrorCode::protoviolation,
|
td::actor::send_closure(SelfId, &ValidatorGroup::update_approve_cache, block_to_cache_key(block),
|
||||||
PSTRING() << "bad candidate: " << reject.reason));
|
ts);
|
||||||
}));
|
promise.set_result(ts);
|
||||||
|
},
|
||||||
|
[&](CandidateReject reject) {
|
||||||
|
promise.set_error(
|
||||||
|
td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad candidate: " << reject.reason));
|
||||||
|
}));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
if (!started_) {
|
if (!started_) {
|
||||||
P.set_error(td::Status::Error(ErrorCode::notready, "validator group not started"));
|
P.set_error(td::Status::Error(ErrorCode::notready, "validator group not started"));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
auto next_block_id = create_next_block_id(block.id.root_hash, block.id.file_hash);
|
|
||||||
VLOG(VALIDATOR_DEBUG) << "validating block candidate " << next_block_id;
|
VLOG(VALIDATOR_DEBUG) << "validating block candidate " << next_block_id;
|
||||||
block.id = next_block_id;
|
block.id = next_block_id;
|
||||||
run_validate_query(shard_, min_ts_, min_masterchain_block_id_, prev_block_ids_, std::move(block), validator_set_,
|
run_validate_query(shard_, min_ts_, min_masterchain_block_id_, prev_block_ids_, std::move(block), validator_set_,
|
||||||
manager_, td::Timestamp::in(15.0), std::move(P));
|
manager_, td::Timestamp::in(15.0), std::move(P));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ValidatorGroup::update_approve_cache(CacheKey key, UnixTime value) {
|
||||||
|
approved_candidates_cache_[key] = value;
|
||||||
|
}
|
||||||
|
|
||||||
void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash src, td::BufferSlice block_data,
|
void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash src, td::BufferSlice block_data,
|
||||||
RootHash root_hash, FileHash file_hash,
|
RootHash root_hash, FileHash file_hash,
|
||||||
std::vector<BlockSignature> signatures,
|
std::vector<BlockSignature> signatures,
|
||||||
|
@ -155,6 +174,7 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s
|
||||||
std::move(approve_sig_set), src == local_id_, manager_, std::move(P));
|
std::move(approve_sig_set), src == local_id_, manager_, std::move(P));
|
||||||
prev_block_ids_ = std::vector<BlockIdExt>{next_block_id};
|
prev_block_ids_ = std::vector<BlockIdExt>{next_block_id};
|
||||||
cached_collated_block_ = nullptr;
|
cached_collated_block_ = nullptr;
|
||||||
|
approved_candidates_cache_.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
void ValidatorGroup::retry_accept_block_query(BlockIdExt block_id, td::Ref<BlockData> block,
|
void ValidatorGroup::retry_accept_block_query(BlockIdExt block_id, td::Ref<BlockData> block,
|
||||||
|
@ -310,6 +330,7 @@ void ValidatorGroup::start(std::vector<BlockIdExt> prev, BlockIdExt min_masterch
|
||||||
min_masterchain_block_id_ = min_masterchain_block_id;
|
min_masterchain_block_id_ = min_masterchain_block_id;
|
||||||
min_ts_ = min_ts;
|
min_ts_ = min_ts;
|
||||||
cached_collated_block_ = nullptr;
|
cached_collated_block_ = nullptr;
|
||||||
|
approved_candidates_cache_.clear();
|
||||||
started_ = true;
|
started_ = true;
|
||||||
|
|
||||||
if (init_) {
|
if (init_) {
|
||||||
|
|
|
@ -126,6 +126,15 @@ class ValidatorGroup : public td::actor::Actor {
|
||||||
std::shared_ptr<CachedCollatedBlock> cached_collated_block_;
|
std::shared_ptr<CachedCollatedBlock> cached_collated_block_;
|
||||||
|
|
||||||
void generated_block_candidate(std::shared_ptr<CachedCollatedBlock> cache, td::Result<BlockCandidate> R);
|
void generated_block_candidate(std::shared_ptr<CachedCollatedBlock> cache, td::Result<BlockCandidate> R);
|
||||||
|
|
||||||
|
typedef std::tuple<td::Bits256, BlockIdExt, FileHash, FileHash> CacheKey;
|
||||||
|
std::map<CacheKey, UnixTime> approved_candidates_cache_;
|
||||||
|
|
||||||
|
void update_approve_cache(CacheKey key, UnixTime value);
|
||||||
|
|
||||||
|
static CacheKey block_to_cache_key(const BlockCandidate& block) {
|
||||||
|
return std::make_tuple(block.pubkey.as_bits256(), block.id, sha256_bits256(block.data), block.collated_file_hash);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace validator
|
} // namespace validator
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue