1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-03-09 15:40:10 +00:00

bugfix in validator

This commit is contained in:
ton 2020-04-03 18:47:22 +04:00
parent 54b40df4aa
commit c300b4ff30
5 changed files with 93 additions and 44 deletions

View file

@ -596,6 +596,36 @@ bool MsgProcessedUptoCollection::for_each_mcseqno(std::function<bool(ton::BlockS
return true;
}
std::ostream& MsgProcessedUpto::print(std::ostream& os) const {
return os << "[" << ton::shard_to_str(shard) << "," << mc_seqno << "," << last_inmsg_lt << ","
<< last_inmsg_hash.to_hex() << "]";
}
std::ostream& MsgProcessedUptoCollection::print(std::ostream& os) const {
os << "MsgProcessedUptoCollection of " << owner.to_str() << " = {";
int i = 0;
for (const auto& entry : list) {
if (i++) {
os << ", ";
}
os << entry;
}
os << "}";
return os;
}
std::string MsgProcessedUpto::to_str() const {
std::ostringstream os;
print(os);
return os.str();
}
std::string MsgProcessedUptoCollection::to_str() const {
std::ostringstream os;
print(os);
return os.str();
}
// unpacks some fields from EnqueuedMsg
bool EnqueuedMsgDescr::unpack(vm::CellSlice& cs) {
block::gen::EnqueuedMsg::Record enq;

View file

@ -174,8 +174,14 @@ struct MsgProcessedUpto {
bool can_check_processed() const {
return (bool)compute_shard_end_lt;
}
std::ostream& print(std::ostream& os) const;
std::string to_str() const;
};
static inline std::ostream& operator<<(std::ostream& os, const MsgProcessedUpto& proc) {
return proc.print(os);
}
struct MsgProcessedUptoCollection {
ton::ShardIdFull owner;
bool valid{false};
@ -202,8 +208,14 @@ struct MsgProcessedUptoCollection {
bool already_processed(const EnqueuedMsgDescr& msg) const;
bool can_check_processed() const;
bool for_each_mcseqno(std::function<bool(ton::BlockSeqno)>) const;
std::ostream& print(std::ostream& os) const;
std::string to_str() const;
};
static inline std::ostream& operator<<(std::ostream& os, const MsgProcessedUptoCollection& proc_coll) {
return proc_coll.print(os);
}
struct ParamLimits {
enum { limits_cnt = 4 };
enum { cl_underload = 0, cl_normal = 1, cl_soft = 2, cl_medium = 3, cl_hard = 4 };

View file

@ -2,20 +2,23 @@
"TonUtil.fif" include
"Asm.fif" include
{ ."usage: " @' $0 type ." <workchain-id> <dest-addr> [<filename-base>]" cr
{ ."usage: " $0 type ." <workchain-id> <dest-addr> [<filename-base>]" cr
."Creates a new pinger in specified workchain, with destination address <dest-addr>. " cr
."Resulting initialization query is saved into <filename-base>-query.boc ('new-pinger-query.boc' by default)" cr 1 halt
} : usage
def? $# { @' $# dup 1 < swap 3 > or ' usage if } if
$# dup 1 < swap 3 > or ' usage if
3 :$1..n
Basechain 256 1<<1- 3 15 */ 2constant dest-addr
Basechain constant wc // create a wallet in workchain 0 (basechain)
def? $1 { @' $1 parse-workchain-id =: wc } if // set workchain id from command line argument
def? $2 { @' $2 false parse-load-address drop 2=: dest-addr } if
def? $3 { @' $3 } { "new-pinger" } cond constant file-base
$1 parse-workchain-id =: wc // set workchain id from command line argument
$2 dup null? { drop } { false parse-load-address drop 2=: dest-addr } cond
$3 "new-pinger" replace-if-null constant file-base
."Creating new pinger in workchain " wc . cr
."Address to ping is " dest-addr 2dup .addr ." = " 6 .Addr cr
// Create new simple pinger
<{ SETCP0 DUP INC 1 RSHIFT# 32 THROWIF // fail unless recv_internal or recv_external

View file

@ -765,14 +765,12 @@ bool Collator::add_trivial_neighbor_after_merge() {
++found;
LOG(DEBUG) << "neighbor #" << i << " : " << nb.blk_.to_str() << " intersects our shard " << shard.to_str();
if (!ton::shard_is_parent(shard, nb.shard()) || found > 2) {
LOG(FATAL) << "impossible shard configuration in add_trivial_neighbor_after_merge()";
return false;
return fatal_error("impossible shard configuration in add_trivial_neighbor_after_merge()");
}
auto prev_shard = prev_blocks.at(found - 1).shard_full();
if (nb.shard() != prev_shard) {
LOG(FATAL) << "neighbor shard " << nb.shard().to_str() << " does not match that of our ancestor "
<< prev_shard.to_str();
return false;
return fatal_error("neighbor shard "s + nb.shard().to_str() + " does not match that of our ancestor " +
prev_shard.to_str());
}
if (found == 1) {
nb.set_queue_root(out_msg_queue_->get_root_cell());
@ -861,8 +859,7 @@ bool Collator::add_trivial_neighbor() {
<< " with shard shrinking to our (immediate after-split adjustment)";
cs = 2;
} else {
LOG(FATAL) << "impossible shard configuration in add_trivial_neighbor()";
return false;
return fatal_error("impossible shard configuration in add_trivial_neighbor()");
}
} else if (ton::shard_is_parent(nb.shard(), shard) && shard == prev_shard) {
// case 3. Continued after-split
@ -917,8 +914,7 @@ bool Collator::add_trivial_neighbor() {
nb.disable();
}
} else {
LOG(FATAL) << "impossible shard configuration in add_trivial_neighbor()";
return false;
return fatal_error("impossible shard configuration in add_trivial_neighbor()");
}
}
}

View file

@ -115,6 +115,14 @@ bool ValidateQuery::fatal_error(td::Status error) {
error.ensure_error();
LOG(ERROR) << "aborting validation of block candidate for " << shard_.to_str() << " : " << error.to_string();
if (main_promise) {
auto c = error.code();
if (c <= -667 && c >= -670) {
errorlog::ErrorLog::log(PSTRING() << "FATAL ERROR: aborting validation of block candidate for " << shard_.to_str()
<< " : " << error << ": data=" << block_candidate.id.file_hash.to_hex()
<< " collated_data=" << block_candidate.collated_file_hash.to_hex());
errorlog::ErrorLog::log_file(block_candidate.data.clone());
errorlog::ErrorLog::log_file(block_candidate.collated_data.clone());
}
main_promise(std::move(error));
}
stop();
@ -1892,14 +1900,12 @@ bool ValidateQuery::add_trivial_neighbor_after_merge() {
++found;
LOG(DEBUG) << "neighbor #" << i << " : " << nb.blk_.to_str() << " intersects our shard " << shard_.to_str();
if (!ton::shard_is_parent(shard_, nb.shard()) || found > 2) {
LOG(FATAL) << "impossible shard configuration in add_trivial_neighbor_after_merge()";
return false;
return fatal_error("impossible shard configuration in add_trivial_neighbor_after_merge()");
}
auto prev_shard = prev_blocks.at(found - 1).shard_full();
if (nb.shard() != prev_shard) {
LOG(FATAL) << "neighbor shard " << nb.shard().to_str() << " does not match that of our ancestor "
<< prev_shard.to_str();
return false;
return fatal_error("neighbor shard "s + nb.shard().to_str() + " does not match that of our ancestor " +
prev_shard.to_str());
}
if (found == 1) {
nb.set_queue_root(ps_.out_msg_queue_->get_root_cell());
@ -1989,8 +1995,7 @@ bool ValidateQuery::add_trivial_neighbor() {
<< " with shard shrinking to our (immediate after-split adjustment)";
cs = 2;
} else {
LOG(FATAL) << "impossible shard configuration in add_trivial_neighbor()";
return false;
return fatal_error("impossible shard configuration in add_trivial_neighbor()");
}
} else if (ton::shard_is_parent(nb.shard(), shard_) && shard_ == prev_shard) {
// case 3. Continued after-split
@ -2045,8 +2050,7 @@ bool ValidateQuery::add_trivial_neighbor() {
nb.disable();
}
} else {
LOG(FATAL) << "impossible shard configuration in add_trivial_neighbor()";
return false;
return fatal_error("impossible shard configuration in add_trivial_neighbor()");
}
}
}
@ -3172,7 +3176,6 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref<vm::CellSlice> in_msg)
break;
}
default:
LOG(FATAL) << "unhandled InMsg tag " << tag;
return fatal_error(PSTRING() << "unknown InMsgTag " << tag);
}
@ -3705,8 +3708,7 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref<vm::CellSlice> out_ms
break;
}
default:
LOG(FATAL) << "unknown OutMsg tag " << tag;
return false;
return fatal_error(PSTRING() << "unknown OutMsg tag " << tag);
}
return true;
@ -3819,9 +3821,9 @@ bool ValidateQuery::check_neighbor_outbound_message(Ref<vm::CellSlice> enq_msg,
bool f0 = ps_.processed_upto_->already_processed(enq);
bool f1 = ns_.processed_upto_->already_processed(enq);
if (f0 && !f1) {
LOG(FATAL) << "a previously processed message has been un-processed (impossible situation after the validation of "
"ProcessedInfo)";
return false;
return fatal_error(
"a previously processed message has been un-processed (impossible situation after the validation of "
"ProcessedInfo)");
}
if (f0) {
// this message has been processed in a previous block of this shard
@ -3847,14 +3849,19 @@ bool ValidateQuery::check_neighbor_outbound_message(Ref<vm::CellSlice> enq_msg,
key.to_hex(352) + " of old outbound queue contains a different MsgEnvelope");
}
}
// next check is incorrect after a merge, when ns_.processed_upto has > 1 entries
// we effectively comment it out
return true;
// NB. we might have a non-trivial dequeueing out_entry with this message hash, but another envelope (for transit messages)
// (so we cannot assert that out_entry is null)
if (claimed_proc_lt_ && (claimed_proc_lt_ < lt || (claimed_proc_lt_ == lt && claimed_proc_hash_ < enq.hash_))) {
LOG(FATAL) << "internal inconsistency: new ProcessedInfo claims to have processed all messages up to ("
<< claimed_proc_lt_ << "," << claimed_proc_hash_.to_hex()
<< "), but we had somehow already processed a message (" << lt << "," << enq.hash_.to_hex()
<< ") from OutMsgQueue of neighbor " << nb.blk_.to_str() << " key " << key.to_hex(352);
return false;
LOG(WARNING) << "old processed_upto: " << ps_.processed_upto_->to_str();
LOG(WARNING) << "new processed_upto: " << ns_.processed_upto_->to_str();
return fatal_error(
-669, PSTRING() << "internal inconsistency: new ProcessedInfo claims to have processed all messages up to ("
<< claimed_proc_lt_ << "," << claimed_proc_hash_.to_hex()
<< "), but we had somehow already processed a message (" << lt << "," << enq.hash_.to_hex()
<< ") from OutMsgQueue of neighbor " << nb.blk_.to_str() << " key " << key.to_hex(352));
}
return true;
}
@ -3862,11 +3869,12 @@ bool ValidateQuery::check_neighbor_outbound_message(Ref<vm::CellSlice> enq_msg,
// this message must have been imported and processed in this very block
// (because it is marked processed after this block, but not before)
if (!claimed_proc_lt_ || claimed_proc_lt_ < lt || (claimed_proc_lt_ == lt && claimed_proc_hash_ < enq.hash_)) {
LOG(FATAL) << "internal inconsistency: new ProcessedInfo claims to have processed all messages up to ("
<< claimed_proc_lt_ << "," << claimed_proc_hash_.to_hex()
<< "), but we had somehow processed in this block a message (" << lt << "," << enq.hash_.to_hex()
<< ") from OutMsgQueue of neighbor " << nb.blk_.to_str() << " key " << key.to_hex(352);
return false;
return fatal_error(
-669, PSTRING() << "internal inconsistency: new ProcessedInfo claims to have processed all messages up to ("
<< claimed_proc_lt_ << "," << claimed_proc_hash_.to_hex()
<< "), but we had somehow processed in this block a message (" << lt << ","
<< enq.hash_.to_hex() << ") from OutMsgQueue of neighbor " << nb.blk_.to_str() << " key "
<< key.to_hex(352));
}
// must have a msg_import_fin or msg_import_tr InMsg record
if (in_entry.is_null()) {
@ -3894,11 +3902,11 @@ bool ValidateQuery::check_neighbor_outbound_message(Ref<vm::CellSlice> enq_msg,
// the message is left unprocessed in our virtual "inbound queue"
// just a simple sanity check
if (claimed_proc_lt_ && !(claimed_proc_lt_ < lt || (claimed_proc_lt_ == lt && claimed_proc_hash_ < enq.hash_))) {
LOG(FATAL) << "internal inconsistency: new ProcessedInfo claims to have processed all messages up to ("
<< claimed_proc_lt_ << "," << claimed_proc_hash_.to_hex()
<< "), but we somehow have not processed a message (" << lt << "," << enq.hash_.to_hex()
<< ") from OutMsgQueue of neighbor " << nb.blk_.to_str() << " key " << key.to_hex(352);
return false;
return fatal_error(
-669, PSTRING() << "internal inconsistency: new ProcessedInfo claims to have processed all messages up to ("
<< claimed_proc_lt_ << "," << claimed_proc_hash_.to_hex()
<< "), but we somehow have not processed a message (" << lt << "," << enq.hash_.to_hex()
<< ") from OutMsgQueue of neighbor " << nb.blk_.to_str() << " key " << key.to_hex(352));
}
return true;
}