mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
validator bugfix + old key blocks download improvement
This commit is contained in:
parent
be9c34c62d
commit
16a4566091
13 changed files with 315 additions and 121 deletions
|
@ -151,8 +151,6 @@ struct ActionPhaseConfig {
|
||||||
struct CreditPhase {
|
struct CreditPhase {
|
||||||
td::RefInt256 due_fees_collected;
|
td::RefInt256 due_fees_collected;
|
||||||
block::CurrencyCollection credit;
|
block::CurrencyCollection credit;
|
||||||
// td::RefInt256 credit;
|
|
||||||
// Ref<vm::Cell> credit_extra;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ComputePhase {
|
struct ComputePhase {
|
||||||
|
@ -233,8 +231,6 @@ struct Account {
|
||||||
ton::UnixTime last_paid;
|
ton::UnixTime last_paid;
|
||||||
vm::CellStorageStat storage_stat;
|
vm::CellStorageStat storage_stat;
|
||||||
block::CurrencyCollection balance;
|
block::CurrencyCollection balance;
|
||||||
// td::RefInt256 balance;
|
|
||||||
// Ref<vm::Cell> extra_balance;
|
|
||||||
td::RefInt256 due_payment;
|
td::RefInt256 due_payment;
|
||||||
Ref<vm::Cell> orig_total_state; // ^Account
|
Ref<vm::Cell> orig_total_state; // ^Account
|
||||||
Ref<vm::Cell> total_state; // ^Account
|
Ref<vm::Cell> total_state; // ^Account
|
||||||
|
@ -325,8 +321,6 @@ struct Transaction {
|
||||||
Ref<vm::Cell> root;
|
Ref<vm::Cell> root;
|
||||||
Ref<vm::Cell> new_total_state;
|
Ref<vm::Cell> new_total_state;
|
||||||
Ref<vm::CellSlice> new_inner_state;
|
Ref<vm::CellSlice> new_inner_state;
|
||||||
// Ref<vm::Cell> extra_balance;
|
|
||||||
// Ref<vm::Cell> msg_extra;
|
|
||||||
Ref<vm::Cell> new_code, new_data, new_library;
|
Ref<vm::Cell> new_code, new_data, new_library;
|
||||||
Ref<vm::Cell> in_msg, in_msg_state;
|
Ref<vm::Cell> in_msg, in_msg_state;
|
||||||
Ref<vm::CellSlice> in_msg_body;
|
Ref<vm::CellSlice> in_msg_body;
|
||||||
|
|
|
@ -401,7 +401,9 @@ tonNode.getNextBlockDescription prev_block:tonNode.blockIdExt = tonNode.BlockDes
|
||||||
tonNode.getNextBlocksDescription prev_block:tonNode.blockIdExt limit:int = tonNode.BlocksDescription;
|
tonNode.getNextBlocksDescription prev_block:tonNode.blockIdExt limit:int = tonNode.BlocksDescription;
|
||||||
tonNode.getPrevBlocksDescription next_block:tonNode.blockIdExt limit:int cutoff_seqno:int = tonNode.BlocksDescription;
|
tonNode.getPrevBlocksDescription next_block:tonNode.blockIdExt limit:int cutoff_seqno:int = tonNode.BlocksDescription;
|
||||||
tonNode.prepareBlockProof block:tonNode.blockIdExt allow_partial:Bool = tonNode.PreparedProof;
|
tonNode.prepareBlockProof block:tonNode.blockIdExt allow_partial:Bool = tonNode.PreparedProof;
|
||||||
|
tonNode.prepareKeyBlockProof block:tonNode.blockIdExt allow_partial:Bool = tonNode.PreparedProof;
|
||||||
tonNode.prepareBlockProofs blocks:(vector tonNode.blockIdExt) allow_partial:Bool = tonNode.PreparedProof;
|
tonNode.prepareBlockProofs blocks:(vector tonNode.blockIdExt) allow_partial:Bool = tonNode.PreparedProof;
|
||||||
|
tonNode.prepareKeyBlockProofs blocks:(vector tonNode.blockIdExt) allow_partial:Bool = tonNode.PreparedProof;
|
||||||
tonNode.prepareBlock block:tonNode.blockIdExt = tonNode.Prepared;
|
tonNode.prepareBlock block:tonNode.blockIdExt = tonNode.Prepared;
|
||||||
tonNode.prepareBlocks blocks:(vector tonNode.blockIdExt) = tonNode.Prepared;
|
tonNode.prepareBlocks blocks:(vector tonNode.blockIdExt) = tonNode.Prepared;
|
||||||
tonNode.preparePersistentState block:tonNode.blockIdExt masterchain_block:tonNode.blockIdExt = tonNode.PreparedState;
|
tonNode.preparePersistentState block:tonNode.blockIdExt masterchain_block:tonNode.blockIdExt = tonNode.PreparedState;
|
||||||
|
@ -415,9 +417,13 @@ tonNode.downloadPersistentState block:tonNode.blockIdExt masterchain_block:tonNo
|
||||||
tonNode.downloadPersistentStateSlice block:tonNode.blockIdExt masterchain_block:tonNode.blockIdExt offset:long max_size:long = tonNode.Data;
|
tonNode.downloadPersistentStateSlice block:tonNode.blockIdExt masterchain_block:tonNode.blockIdExt offset:long max_size:long = tonNode.Data;
|
||||||
tonNode.downloadZeroState block:tonNode.blockIdExt = tonNode.Data;
|
tonNode.downloadZeroState block:tonNode.blockIdExt = tonNode.Data;
|
||||||
tonNode.downloadBlockProof block:tonNode.blockIdExt = tonNode.Data;
|
tonNode.downloadBlockProof block:tonNode.blockIdExt = tonNode.Data;
|
||||||
|
tonNode.downloadKeyBlockProof block:tonNode.blockIdExt = tonNode.Data;
|
||||||
tonNode.downloadBlockProofs blocks:(vector tonNode.blockIdExt) = tonNode.DataList;
|
tonNode.downloadBlockProofs blocks:(vector tonNode.blockIdExt) = tonNode.DataList;
|
||||||
|
tonNode.downloadKeyBlockProofs blocks:(vector tonNode.blockIdExt) = tonNode.DataList;
|
||||||
tonNode.downloadBlockProofLink block:tonNode.blockIdExt = tonNode.Data;
|
tonNode.downloadBlockProofLink block:tonNode.blockIdExt = tonNode.Data;
|
||||||
|
tonNode.downloadKeyBlockProofLink block:tonNode.blockIdExt = tonNode.Data;
|
||||||
tonNode.downloadBlockProofLinks blocks:(vector tonNode.blockIdExt) = tonNode.DataList;
|
tonNode.downloadBlockProofLinks blocks:(vector tonNode.blockIdExt) = tonNode.DataList;
|
||||||
|
tonNode.downloadKeyBlockProofLinks blocks:(vector tonNode.blockIdExt) = tonNode.DataList;
|
||||||
tonNode.getArchiveInfo masterchain_seqno:int = tonNode.ArchiveInfo;
|
tonNode.getArchiveInfo masterchain_seqno:int = tonNode.ArchiveInfo;
|
||||||
tonNode.getArchiveSlice archive_id:long offset:long max_size:int = tonNode.Data;
|
tonNode.getArchiveSlice archive_id:long offset:long max_size:int = tonNode.Data;
|
||||||
|
|
||||||
|
|
Binary file not shown.
|
@ -144,6 +144,35 @@ void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNo
|
||||||
create_block_id(query.block_), false, std::move(P));
|
create_block_id(query.block_), false, std::move(P));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise) {
|
||||||
|
if (query.block_->seqno_ == 0) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "cannot download proof for zero state"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto P = td::PromiseCreator::lambda(
|
||||||
|
[allow_partial = query.allow_partial_, promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
|
||||||
|
if (R.is_error()) {
|
||||||
|
auto x = create_serialize_tl_object<ton_api::tonNode_preparedProofEmpty>();
|
||||||
|
promise.set_value(std::move(x));
|
||||||
|
} else if (allow_partial) {
|
||||||
|
auto x = create_serialize_tl_object<ton_api::tonNode_preparedProofLink>();
|
||||||
|
promise.set_value(std::move(x));
|
||||||
|
} else {
|
||||||
|
auto x = create_serialize_tl_object<ton_api::tonNode_preparedProof>();
|
||||||
|
promise.set_value(std::move(x));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (query.allow_partial_) {
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof_link,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
} else {
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
||||||
td::Promise<td::BufferSlice> promise) {
|
td::Promise<td::BufferSlice> promise) {
|
||||||
auto P = td::PromiseCreator::lambda(
|
auto P = td::PromiseCreator::lambda(
|
||||||
|
@ -190,6 +219,42 @@ void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNo
|
||||||
create_block_id(query.block_), false, std::move(P));
|
create_block_id(query.block_), false, std::move(P));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise) {
|
||||||
|
if (query.block_->seqno_ == 0) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "cannot download proof for zero state"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto P = td::PromiseCreator::lambda([promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
|
||||||
|
if (R.is_error()) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "unknown block proof"));
|
||||||
|
} else {
|
||||||
|
promise.set_value(R.move_as_ok());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
}
|
||||||
|
|
||||||
|
void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProofLink &query,
|
||||||
|
td::Promise<td::BufferSlice> promise) {
|
||||||
|
if (query.block_->seqno_ == 0) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "cannot download proof for zero state"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto P = td::PromiseCreator::lambda([promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
|
||||||
|
if (R.is_error()) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "unknown block proof"));
|
||||||
|
} else {
|
||||||
|
promise.set_value(R.move_as_ok());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof_link,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
}
|
||||||
|
|
||||||
void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareZeroState &query,
|
void FullNodeMasterImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareZeroState &query,
|
||||||
td::Promise<td::BufferSlice> promise) {
|
td::Promise<td::BufferSlice> promise) {
|
||||||
auto P =
|
auto P =
|
||||||
|
|
|
@ -44,10 +44,16 @@ class FullNodeMasterImpl : public FullNodeMaster {
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlockProof &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlockProof &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProofLink &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProofLink &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise);
|
||||||
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProofLink &query,
|
||||||
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlock &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlock &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlock &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlock &query,
|
||||||
|
|
|
@ -287,6 +287,35 @@ void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNod
|
||||||
create_block_id(query.block_), false, std::move(P));
|
create_block_id(query.block_), false, std::move(P));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise) {
|
||||||
|
if (query.block_->seqno_ == 0) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "cannot download proof for zero state"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto P = td::PromiseCreator::lambda(
|
||||||
|
[allow_partial = query.allow_partial_, promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
|
||||||
|
if (R.is_error()) {
|
||||||
|
auto x = create_serialize_tl_object<ton_api::tonNode_preparedProofEmpty>();
|
||||||
|
promise.set_value(std::move(x));
|
||||||
|
} else if (allow_partial) {
|
||||||
|
auto x = create_serialize_tl_object<ton_api::tonNode_preparedProofLink>();
|
||||||
|
promise.set_value(std::move(x));
|
||||||
|
} else {
|
||||||
|
auto x = create_serialize_tl_object<ton_api::tonNode_preparedProof>();
|
||||||
|
promise.set_value(std::move(x));
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (query.allow_partial_) {
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof_link,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
} else {
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
||||||
td::Promise<td::BufferSlice> promise) {
|
td::Promise<td::BufferSlice> promise) {
|
||||||
auto P = td::PromiseCreator::lambda(
|
auto P = td::PromiseCreator::lambda(
|
||||||
|
@ -333,6 +362,42 @@ void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNod
|
||||||
create_block_id(query.block_), false, std::move(P));
|
create_block_id(query.block_), false, std::move(P));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise) {
|
||||||
|
if (query.block_->seqno_ == 0) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "cannot download proof for zero state"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto P = td::PromiseCreator::lambda([promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
|
||||||
|
if (R.is_error()) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "unknown block proof"));
|
||||||
|
} else {
|
||||||
|
promise.set_value(R.move_as_ok());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
}
|
||||||
|
|
||||||
|
void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProofLink &query,
|
||||||
|
td::Promise<td::BufferSlice> promise) {
|
||||||
|
if (query.block_->seqno_ == 0) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "cannot download proof for zero state"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto P = td::PromiseCreator::lambda([promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
|
||||||
|
if (R.is_error()) {
|
||||||
|
promise.set_error(td::Status::Error(ErrorCode::protoviolation, "unknown block proof"));
|
||||||
|
} else {
|
||||||
|
promise.set_value(R.move_as_ok());
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::get_key_block_proof_link,
|
||||||
|
create_block_id(query.block_), std::move(P));
|
||||||
|
}
|
||||||
|
|
||||||
void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareZeroState &query,
|
void FullNodeShardImpl::process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareZeroState &query,
|
||||||
td::Promise<td::BufferSlice> promise) {
|
td::Promise<td::BufferSlice> promise) {
|
||||||
auto P =
|
auto P =
|
||||||
|
@ -639,7 +704,7 @@ void FullNodeShardImpl::download_persistent_state(BlockIdExt id, BlockIdExt mast
|
||||||
void FullNodeShardImpl::download_block_proof(BlockIdExt block_id, td::uint32 priority, td::Timestamp timeout,
|
void FullNodeShardImpl::download_block_proof(BlockIdExt block_id, td::uint32 priority, td::Timestamp timeout,
|
||||||
td::Promise<td::BufferSlice> promise) {
|
td::Promise<td::BufferSlice> promise) {
|
||||||
auto &b = choose_neighbour();
|
auto &b = choose_neighbour();
|
||||||
td::actor::create_actor<DownloadProof>("downloadproofreq", block_id, false, adnl_id_, overlay_id_, b.adnl_id,
|
td::actor::create_actor<DownloadProof>("downloadproofreq", block_id, false, false, adnl_id_, overlay_id_, b.adnl_id,
|
||||||
priority, timeout, validator_manager_, rldp_, overlays_, adnl_, client_,
|
priority, timeout, validator_manager_, rldp_, overlays_, adnl_, client_,
|
||||||
create_neighbour_promise(b, std::move(promise)))
|
create_neighbour_promise(b, std::move(promise)))
|
||||||
.release();
|
.release();
|
||||||
|
@ -648,7 +713,7 @@ void FullNodeShardImpl::download_block_proof(BlockIdExt block_id, td::uint32 pri
|
||||||
void FullNodeShardImpl::download_block_proof_link(BlockIdExt block_id, td::uint32 priority, td::Timestamp timeout,
|
void FullNodeShardImpl::download_block_proof_link(BlockIdExt block_id, td::uint32 priority, td::Timestamp timeout,
|
||||||
td::Promise<td::BufferSlice> promise) {
|
td::Promise<td::BufferSlice> promise) {
|
||||||
auto &b = choose_neighbour();
|
auto &b = choose_neighbour();
|
||||||
td::actor::create_actor<DownloadProof>("downloadproofreq", block_id, true, adnl_id_, overlay_id_,
|
td::actor::create_actor<DownloadProof>("downloadproofreq", block_id, true, false, adnl_id_, overlay_id_,
|
||||||
adnl::AdnlNodeIdShort::zero(), priority, timeout, validator_manager_, rldp_,
|
adnl::AdnlNodeIdShort::zero(), priority, timeout, validator_manager_, rldp_,
|
||||||
overlays_, adnl_, client_, create_neighbour_promise(b, std::move(promise)))
|
overlays_, adnl_, client_, create_neighbour_promise(b, std::move(promise)))
|
||||||
.release();
|
.release();
|
||||||
|
|
|
@ -64,7 +64,7 @@ class FullNodeShardImpl : public FullNodeShard {
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
static constexpr td::uint64 proto_capabilities() {
|
static constexpr td::uint64 proto_capabilities() {
|
||||||
return 0;
|
return 1;
|
||||||
}
|
}
|
||||||
static constexpr td::uint32 max_neighbours() {
|
static constexpr td::uint32 max_neighbours() {
|
||||||
return 16;
|
return 16;
|
||||||
|
@ -94,10 +94,16 @@ class FullNodeShardImpl : public FullNodeShard {
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlockProof &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlockProof &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProof &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProofLink &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlockProofLink &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProof &query,
|
||||||
|
td::Promise<td::BufferSlice> promise);
|
||||||
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadKeyBlockProofLink &query,
|
||||||
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlock &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_prepareBlock &query,
|
||||||
td::Promise<td::BufferSlice> promise);
|
td::Promise<td::BufferSlice> promise);
|
||||||
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlock &query,
|
void process_query(adnl::AdnlNodeIdShort src, ton_api::tonNode_downloadBlock &query,
|
||||||
|
|
|
@ -47,9 +47,9 @@ class Collator final : public td::actor::Actor {
|
||||||
}
|
}
|
||||||
using LtCellRef = block::LtCellRef;
|
using LtCellRef = block::LtCellRef;
|
||||||
using NewOutMsg = block::NewOutMsg;
|
using NewOutMsg = block::NewOutMsg;
|
||||||
const ShardIdFull shard;
|
const ShardIdFull shard_;
|
||||||
ton::BlockId new_id;
|
ton::BlockId new_id;
|
||||||
bool busy{false};
|
bool busy_{false};
|
||||||
bool before_split_{false};
|
bool before_split_{false};
|
||||||
bool after_split_{false};
|
bool after_split_{false};
|
||||||
bool after_merge_{false};
|
bool after_merge_{false};
|
||||||
|
@ -69,7 +69,7 @@ class Collator final : public td::actor::Actor {
|
||||||
std::vector<Ref<ShardState>> prev_states;
|
std::vector<Ref<ShardState>> prev_states;
|
||||||
std::vector<Ref<BlockData>> prev_block_data;
|
std::vector<Ref<BlockData>> prev_block_data;
|
||||||
Ed25519_PublicKey created_by_;
|
Ed25519_PublicKey created_by_;
|
||||||
Ref<ValidatorSet> validator_set;
|
Ref<ValidatorSet> validator_set_;
|
||||||
td::actor::ActorId<ValidatorManager> manager;
|
td::actor::ActorId<ValidatorManager> manager;
|
||||||
td::Timestamp timeout;
|
td::Timestamp timeout;
|
||||||
td::Promise<BlockCandidate> main_promise;
|
td::Promise<BlockCandidate> main_promise;
|
||||||
|
@ -90,13 +90,13 @@ class Collator final : public td::actor::Actor {
|
||||||
td::Timestamp timeout, td::Promise<BlockCandidate> promise);
|
td::Timestamp timeout, td::Promise<BlockCandidate> promise);
|
||||||
~Collator() override = default;
|
~Collator() override = default;
|
||||||
bool is_busy() const {
|
bool is_busy() const {
|
||||||
return busy;
|
return busy_;
|
||||||
}
|
}
|
||||||
ShardId get_shard() const {
|
ShardId get_shard() const {
|
||||||
return shard.shard;
|
return shard_.shard;
|
||||||
}
|
}
|
||||||
WorkchainId workchain() const {
|
WorkchainId workchain() const {
|
||||||
return shard.workchain;
|
return shard_.workchain;
|
||||||
}
|
}
|
||||||
static constexpr td::uint32 priority() {
|
static constexpr td::uint32 priority() {
|
||||||
return 2;
|
return 2;
|
||||||
|
@ -232,6 +232,7 @@ class Collator final : public td::actor::Actor {
|
||||||
bool create_ticktock_transactions(int mask);
|
bool create_ticktock_transactions(int mask);
|
||||||
bool create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask);
|
bool create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask);
|
||||||
Ref<vm::Cell> create_ordinary_transaction(Ref<vm::Cell> msg_root);
|
Ref<vm::Cell> create_ordinary_transaction(Ref<vm::Cell> msg_root);
|
||||||
|
bool check_cur_validator_set();
|
||||||
bool unpack_last_mc_state();
|
bool unpack_last_mc_state();
|
||||||
bool unpack_last_state();
|
bool unpack_last_state();
|
||||||
bool unpack_merge_last_state();
|
bool unpack_merge_last_state();
|
||||||
|
@ -248,7 +249,7 @@ class Collator final : public td::actor::Actor {
|
||||||
bool request_neighbor_msg_queues();
|
bool request_neighbor_msg_queues();
|
||||||
void update_max_lt(ton::LogicalTime lt);
|
void update_max_lt(ton::LogicalTime lt);
|
||||||
bool is_masterchain() const {
|
bool is_masterchain() const {
|
||||||
return shard.is_masterchain();
|
return shard_.is_masterchain();
|
||||||
}
|
}
|
||||||
bool is_our_address(Ref<vm::CellSlice> addr_ref) const;
|
bool is_our_address(Ref<vm::CellSlice> addr_ref) const;
|
||||||
bool is_our_address(ton::AccountIdPrefixFull addr_prefix) const;
|
bool is_our_address(ton::AccountIdPrefixFull addr_prefix) const;
|
||||||
|
|
|
@ -58,25 +58,25 @@ Collator::Collator(ShardIdFull shard, UnixTime min_ts, BlockIdExt min_masterchai
|
||||||
std::vector<BlockIdExt> prev, td::Ref<ValidatorSet> validator_set, Ed25519_PublicKey collator_id,
|
std::vector<BlockIdExt> prev, td::Ref<ValidatorSet> validator_set, Ed25519_PublicKey collator_id,
|
||||||
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout,
|
td::actor::ActorId<ValidatorManager> manager, td::Timestamp timeout,
|
||||||
td::Promise<BlockCandidate> promise)
|
td::Promise<BlockCandidate> promise)
|
||||||
: shard(shard)
|
: shard_(shard)
|
||||||
, min_ts(min_ts)
|
, min_ts(min_ts)
|
||||||
, min_mc_block_id{min_masterchain_block_id}
|
, min_mc_block_id{min_masterchain_block_id}
|
||||||
, prev_blocks(std::move(prev))
|
, prev_blocks(std::move(prev))
|
||||||
, created_by_(collator_id)
|
, created_by_(collator_id)
|
||||||
, validator_set(std::move(validator_set))
|
, validator_set_(std::move(validator_set))
|
||||||
, manager(manager)
|
, manager(manager)
|
||||||
, timeout(timeout)
|
, timeout(timeout)
|
||||||
, main_promise(std::move(promise)) {
|
, main_promise(std::move(promise)) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void Collator::start_up() {
|
void Collator::start_up() {
|
||||||
LOG(DEBUG) << "Collator for shard " << shard.to_str() << " started";
|
LOG(DEBUG) << "Collator for shard " << shard_.to_str() << " started";
|
||||||
LOG(DEBUG) << "Previous block #1 is " << prev_blocks.at(0).to_str();
|
LOG(DEBUG) << "Previous block #1 is " << prev_blocks.at(0).to_str();
|
||||||
if (prev_blocks.size() > 1) {
|
if (prev_blocks.size() > 1) {
|
||||||
LOG(DEBUG) << "Previous block #2 is " << prev_blocks.at(1).to_str();
|
LOG(DEBUG) << "Previous block #2 is " << prev_blocks.at(1).to_str();
|
||||||
}
|
}
|
||||||
// 1. check validity of parameters, especially prev_blocks, shard and min_mc_block_id
|
// 1. check validity of parameters, especially prev_blocks, shard and min_mc_block_id
|
||||||
if (shard.workchain != ton::masterchainId && shard.workchain != ton::basechainId) {
|
if (workchain() != ton::masterchainId && workchain() != ton::basechainId) {
|
||||||
fatal_error(-667, "can create block candidates only for masterchain (-1) and base workchain (0)");
|
fatal_error(-667, "can create block candidates only for masterchain (-1) and base workchain (0)");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -84,16 +84,16 @@ void Collator::start_up() {
|
||||||
fatal_error(-666, "collator is busy creating another block candidate");
|
fatal_error(-666, "collator is busy creating another block candidate");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!shard.is_valid_ext()) {
|
if (!shard_.is_valid_ext()) {
|
||||||
fatal_error(-666, "requested to generate a block for an invalid shard");
|
fatal_error(-666, "requested to generate a block for an invalid shard");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
td::uint64 x = td::lower_bit64(shard.shard);
|
td::uint64 x = td::lower_bit64(get_shard());
|
||||||
if (x < 8) {
|
if (x < 8) {
|
||||||
fatal_error(-666, "cannot split a shard more than 60 times");
|
fatal_error(-666, "cannot split a shard more than 60 times");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (is_masterchain() && !shard.is_masterchain_ext()) {
|
if (is_masterchain() && !shard_.is_masterchain_ext()) {
|
||||||
fatal_error(-666, "sub-shards cannot exist in the masterchain");
|
fatal_error(-666, "sub-shards cannot exist in the masterchain");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -114,8 +114,8 @@ void Collator::start_up() {
|
||||||
fatal_error(-666, "cannot merge shards in masterchain");
|
fatal_error(-666, "cannot merge shards in masterchain");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (!(shard_is_parent(shard, ShardIdFull(prev_blocks[0])) && shard_is_parent(shard, ShardIdFull(prev_blocks[1])) &&
|
if (!(shard_is_parent(shard_, ShardIdFull(prev_blocks[0])) &&
|
||||||
prev_blocks[0].id.shard < prev_blocks[1].id.shard)) {
|
shard_is_parent(shard_, ShardIdFull(prev_blocks[1])) && prev_blocks[0].id.shard < prev_blocks[1].id.shard)) {
|
||||||
fatal_error(
|
fatal_error(
|
||||||
-666, "the two previous blocks for a merge operation are not siblings or are not children of current shard");
|
-666, "the two previous blocks for a merge operation are not siblings or are not children of current shard");
|
||||||
return;
|
return;
|
||||||
|
@ -127,7 +127,7 @@ void Collator::start_up() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
after_merge_ = true;
|
after_merge_ = true;
|
||||||
LOG(INFO) << "AFTER_MERGE set for the new block of " << shard.to_str();
|
LOG(INFO) << "AFTER_MERGE set for the new block of " << shard_.to_str();
|
||||||
} else {
|
} else {
|
||||||
CHECK(prev_blocks.size() == 1);
|
CHECK(prev_blocks.size() == 1);
|
||||||
// creating next block
|
// creating next block
|
||||||
|
@ -135,12 +135,12 @@ void Collator::start_up() {
|
||||||
fatal_error(-666, "previous block does not have a valid id");
|
fatal_error(-666, "previous block does not have a valid id");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (ShardIdFull(prev_blocks[0]) != shard) {
|
if (ShardIdFull(prev_blocks[0]) != shard_) {
|
||||||
after_split_ = true;
|
after_split_ = true;
|
||||||
right_child_ = ton::is_right_child(shard);
|
right_child_ = ton::is_right_child(shard_);
|
||||||
LOG(INFO) << "AFTER_SPLIT set for the new block of " << shard.to_str() << " (generating "
|
LOG(INFO) << "AFTER_SPLIT set for the new block of " << shard_.to_str() << " (generating "
|
||||||
<< (right_child_ ? "right" : "left") << " child)";
|
<< (right_child_ ? "right" : "left") << " child)";
|
||||||
if (!shard_is_parent(ShardIdFull(prev_blocks[0]), shard)) {
|
if (!shard_is_parent(ShardIdFull(prev_blocks[0]), shard_)) {
|
||||||
fatal_error(-666, "previous block does not belong to the shard we are generating a new block for");
|
fatal_error(-666, "previous block does not belong to the shard we are generating a new block for");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -156,7 +156,7 @@ void Collator::start_up() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
busy = true;
|
busy_ = true;
|
||||||
step = 1;
|
step = 1;
|
||||||
if (!is_masterchain()) {
|
if (!is_masterchain()) {
|
||||||
// 2. learn latest masterchain state and block id
|
// 2. learn latest masterchain state and block id
|
||||||
|
@ -198,7 +198,7 @@ void Collator::start_up() {
|
||||||
// 4. load external messages
|
// 4. load external messages
|
||||||
LOG(DEBUG) << "sending get_external_messages() query to Manager";
|
LOG(DEBUG) << "sending get_external_messages() query to Manager";
|
||||||
++pending;
|
++pending;
|
||||||
td::actor::send_closure_later(manager, &ValidatorManager::get_external_messages, shard,
|
td::actor::send_closure_later(manager, &ValidatorManager::get_external_messages, shard_,
|
||||||
[self = get_self()](td::Result<std::vector<Ref<ExtMessage>>> res) -> void {
|
[self = get_self()](td::Result<std::vector<Ref<ExtMessage>>> res) -> void {
|
||||||
LOG(DEBUG) << "got answer to get_external_messages() query";
|
LOG(DEBUG) << "got answer to get_external_messages() query";
|
||||||
td::actor::send_closure_later(std::move(self), &Collator::after_get_external_messages,
|
td::actor::send_closure_later(std::move(self), &Collator::after_get_external_messages,
|
||||||
|
@ -248,10 +248,10 @@ std::string show_shard(const ton::ShardIdFull blk_id) {
|
||||||
|
|
||||||
bool Collator::fatal_error(td::Status error) {
|
bool Collator::fatal_error(td::Status error) {
|
||||||
error.ensure_error();
|
error.ensure_error();
|
||||||
LOG(ERROR) << "cannot generate block candidate for " << show_shard(shard) << " : " << error.to_string();
|
LOG(ERROR) << "cannot generate block candidate for " << show_shard(shard_) << " : " << error.to_string();
|
||||||
if (busy) {
|
if (busy_) {
|
||||||
main_promise(std::move(error));
|
main_promise(std::move(error));
|
||||||
busy = false;
|
busy_ = false;
|
||||||
}
|
}
|
||||||
stop();
|
stop();
|
||||||
return false;
|
return false;
|
||||||
|
@ -548,11 +548,33 @@ bool Collator::unpack_last_mc_state() {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool Collator::check_cur_validator_set() {
|
||||||
|
CatchainSeqno cc_seqno = 0;
|
||||||
|
auto nodes = config_->compute_validator_set_cc(shard_, now_, &cc_seqno);
|
||||||
|
if (nodes.empty()) {
|
||||||
|
return fatal_error("cannot compute validator set for shard "s + shard_.to_str() + " from old masterchain state");
|
||||||
|
}
|
||||||
|
std::vector<ValidatorDescr> export_nodes;
|
||||||
|
if (validator_set_.not_null()) {
|
||||||
|
if (validator_set_->get_catchain_seqno() != cc_seqno) {
|
||||||
|
return fatal_error(PSTRING() << "current validator set catchain seqno mismatch: this validator set has cc_seqno="
|
||||||
|
<< validator_set_->get_catchain_seqno() << ", only validator set with cc_seqno="
|
||||||
|
<< cc_seqno << " is entitled to create block in shardchain " << shard_.to_str());
|
||||||
|
}
|
||||||
|
export_nodes = validator_set_->export_vector();
|
||||||
|
}
|
||||||
|
if (export_nodes != nodes /* && !is_fake_ */) {
|
||||||
|
return fatal_error(
|
||||||
|
"current validator set mismatch: this validator set is not entitled to create block in shardchain "s +
|
||||||
|
shard_.to_str());
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
bool Collator::request_neighbor_msg_queues() {
|
bool Collator::request_neighbor_msg_queues() {
|
||||||
assert(config_ && shard_conf_);
|
assert(config_ && shard_conf_);
|
||||||
auto neighbor_list = shard_conf_->get_neighbor_shard_hash_ids(shard);
|
auto neighbor_list = shard_conf_->get_neighbor_shard_hash_ids(shard_);
|
||||||
LOG(DEBUG) << "got a preliminary list of " << neighbor_list.size() << " neighbors for "
|
LOG(DEBUG) << "got a preliminary list of " << neighbor_list.size() << " neighbors for " << shard_.to_str();
|
||||||
<< block::ShardId{shard}.to_str();
|
|
||||||
for (ton::BlockId blk_id : neighbor_list) {
|
for (ton::BlockId blk_id : neighbor_list) {
|
||||||
auto shard_ptr = shard_conf_->get_shard_hash(ton::ShardIdFull(blk_id));
|
auto shard_ptr = shard_conf_->get_shard_hash(ton::ShardIdFull(blk_id));
|
||||||
if (shard_ptr.is_null()) {
|
if (shard_ptr.is_null()) {
|
||||||
|
@ -711,9 +733,9 @@ bool Collator::unpack_one_last_state(block::ShardState& ss, BlockIdExt blkid, Re
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Collator::split_last_state(block::ShardState& ss) {
|
bool Collator::split_last_state(block::ShardState& ss) {
|
||||||
LOG(INFO) << "Splitting previous state " << ss.id_.to_str() << " to subshard " << shard.to_str();
|
LOG(INFO) << "Splitting previous state " << ss.id_.to_str() << " to subshard " << shard_.to_str();
|
||||||
CHECK(after_split_);
|
CHECK(after_split_);
|
||||||
auto sib_shard = ton::shard_sibling(shard);
|
auto sib_shard = ton::shard_sibling(shard_);
|
||||||
auto res1 = ss.compute_split_out_msg_queue(sib_shard);
|
auto res1 = ss.compute_split_out_msg_queue(sib_shard);
|
||||||
if (res1.is_error()) {
|
if (res1.is_error()) {
|
||||||
return fatal_error(res1.move_as_error());
|
return fatal_error(res1.move_as_error());
|
||||||
|
@ -724,7 +746,7 @@ bool Collator::split_last_state(block::ShardState& ss) {
|
||||||
return fatal_error(res2.move_as_error());
|
return fatal_error(res2.move_as_error());
|
||||||
}
|
}
|
||||||
sibling_processed_upto_ = res2.move_as_ok();
|
sibling_processed_upto_ = res2.move_as_ok();
|
||||||
auto res3 = ss.split(shard);
|
auto res3 = ss.split(shard_);
|
||||||
if (res3.is_error()) {
|
if (res3.is_error()) {
|
||||||
return fatal_error(std::move(res3));
|
return fatal_error(std::move(res3));
|
||||||
}
|
}
|
||||||
|
@ -763,10 +785,10 @@ bool Collator::add_trivial_neighbor_after_merge() {
|
||||||
std::size_t n = neighbors_.size();
|
std::size_t n = neighbors_.size();
|
||||||
for (std::size_t i = 0; i < n; i++) {
|
for (std::size_t i = 0; i < n; i++) {
|
||||||
auto& nb = neighbors_.at(i);
|
auto& nb = neighbors_.at(i);
|
||||||
if (ton::shard_intersects(nb.shard(), shard)) {
|
if (ton::shard_intersects(nb.shard(), shard_)) {
|
||||||
++found;
|
++found;
|
||||||
LOG(DEBUG) << "neighbor #" << i << " : " << nb.blk_.to_str() << " intersects our shard " << shard.to_str();
|
LOG(DEBUG) << "neighbor #" << i << " : " << nb.blk_.to_str() << " intersects our shard " << shard_.to_str();
|
||||||
if (!ton::shard_is_parent(shard, nb.shard()) || found > 2) {
|
if (!ton::shard_is_parent(shard_, nb.shard()) || found > 2) {
|
||||||
return fatal_error("impossible shard configuration in add_trivial_neighbor_after_merge()");
|
return fatal_error("impossible shard configuration in add_trivial_neighbor_after_merge()");
|
||||||
}
|
}
|
||||||
auto prev_shard = prev_blocks.at(found - 1).shard_full();
|
auto prev_shard = prev_blocks.at(found - 1).shard_full();
|
||||||
|
@ -777,7 +799,7 @@ bool Collator::add_trivial_neighbor_after_merge() {
|
||||||
if (found == 1) {
|
if (found == 1) {
|
||||||
nb.set_queue_root(out_msg_queue_->get_root_cell());
|
nb.set_queue_root(out_msg_queue_->get_root_cell());
|
||||||
nb.processed_upto = processed_upto_;
|
nb.processed_upto = processed_upto_;
|
||||||
nb.blk_.id.shard = shard.shard;
|
nb.blk_.id.shard = get_shard();
|
||||||
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb.blk_.to_str()
|
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb.blk_.to_str()
|
||||||
<< " with shard expansion (immediate after-merge adjustment)";
|
<< " with shard expansion (immediate after-merge adjustment)";
|
||||||
} else {
|
} else {
|
||||||
|
@ -828,11 +850,11 @@ bool Collator::add_trivial_neighbor() {
|
||||||
std::size_t n = neighbors_.size();
|
std::size_t n = neighbors_.size();
|
||||||
for (std::size_t i = 0; i < n; i++) {
|
for (std::size_t i = 0; i < n; i++) {
|
||||||
auto& nb = neighbors_.at(i);
|
auto& nb = neighbors_.at(i);
|
||||||
if (ton::shard_intersects(nb.shard(), shard)) {
|
if (ton::shard_intersects(nb.shard(), shard_)) {
|
||||||
++found;
|
++found;
|
||||||
LOG(DEBUG) << "neighbor #" << i << " : " << nb.blk_.to_str() << " intersects our shard " << shard.to_str();
|
LOG(DEBUG) << "neighbor #" << i << " : " << nb.blk_.to_str() << " intersects our shard " << shard_.to_str();
|
||||||
if (nb.shard() == prev_shard) {
|
if (nb.shard() == prev_shard) {
|
||||||
if (prev_shard == shard) {
|
if (prev_shard == shard_) {
|
||||||
// case 1. Normal.
|
// case 1. Normal.
|
||||||
CHECK(found == 1);
|
CHECK(found == 1);
|
||||||
nb = *descr_ref;
|
nb = *descr_ref;
|
||||||
|
@ -840,7 +862,7 @@ bool Collator::add_trivial_neighbor() {
|
||||||
nb.processed_upto = processed_upto_;
|
nb.processed_upto = processed_upto_;
|
||||||
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb.blk_.to_str() << " (simple replacement)";
|
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb.blk_.to_str() << " (simple replacement)";
|
||||||
cs = 1;
|
cs = 1;
|
||||||
} else if (ton::shard_is_parent(nb.shard(), shard)) {
|
} else if (ton::shard_is_parent(nb.shard(), shard_)) {
|
||||||
// case 2. Immediate after-split.
|
// case 2. Immediate after-split.
|
||||||
CHECK(found == 1);
|
CHECK(found == 1);
|
||||||
CHECK(after_split_);
|
CHECK(after_split_);
|
||||||
|
@ -850,20 +872,20 @@ bool Collator::add_trivial_neighbor() {
|
||||||
auto& nb2 = neighbors_.at(i);
|
auto& nb2 = neighbors_.at(i);
|
||||||
nb2.set_queue_root(sibling_out_msg_queue_->get_root_cell());
|
nb2.set_queue_root(sibling_out_msg_queue_->get_root_cell());
|
||||||
nb2.processed_upto = sibling_processed_upto_;
|
nb2.processed_upto = sibling_processed_upto_;
|
||||||
nb2.blk_.id.shard = ton::shard_sibling(shard.shard);
|
nb2.blk_.id.shard = ton::shard_sibling(get_shard());
|
||||||
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb2.blk_.to_str()
|
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb2.blk_.to_str()
|
||||||
<< " with shard shrinking to our sibling (immediate after-split adjustment)";
|
<< " with shard shrinking to our sibling (immediate after-split adjustment)";
|
||||||
auto& nb1 = neighbors_.at(n);
|
auto& nb1 = neighbors_.at(n);
|
||||||
nb1.set_queue_root(out_msg_queue_->get_root_cell());
|
nb1.set_queue_root(out_msg_queue_->get_root_cell());
|
||||||
nb1.processed_upto = processed_upto_;
|
nb1.processed_upto = processed_upto_;
|
||||||
nb1.blk_.id.shard = shard.shard;
|
nb1.blk_.id.shard = get_shard();
|
||||||
LOG(DEBUG) << "created neighbor #" << n << " : " << nb1.blk_.to_str()
|
LOG(DEBUG) << "created neighbor #" << n << " : " << nb1.blk_.to_str()
|
||||||
<< " with shard shrinking to our (immediate after-split adjustment)";
|
<< " with shard shrinking to our (immediate after-split adjustment)";
|
||||||
cs = 2;
|
cs = 2;
|
||||||
} else {
|
} else {
|
||||||
return fatal_error("impossible shard configuration in add_trivial_neighbor()");
|
return fatal_error("impossible shard configuration in add_trivial_neighbor()");
|
||||||
}
|
}
|
||||||
} else if (ton::shard_is_parent(nb.shard(), shard) && shard == prev_shard) {
|
} else if (ton::shard_is_parent(nb.shard(), shard_) && shard_ == prev_shard) {
|
||||||
// case 3. Continued after-split
|
// case 3. Continued after-split
|
||||||
CHECK(found == 1);
|
CHECK(found == 1);
|
||||||
CHECK(!after_split_);
|
CHECK(!after_split_);
|
||||||
|
@ -871,14 +893,14 @@ bool Collator::add_trivial_neighbor() {
|
||||||
CHECK(!sibling_processed_upto_);
|
CHECK(!sibling_processed_upto_);
|
||||||
neighbors_.emplace_back(*descr_ref);
|
neighbors_.emplace_back(*descr_ref);
|
||||||
auto& nb2 = neighbors_.at(i);
|
auto& nb2 = neighbors_.at(i);
|
||||||
auto sib_shard = ton::shard_sibling(shard);
|
auto sib_shard = ton::shard_sibling(shard_);
|
||||||
// compute the part of virtual sibling's OutMsgQueue with destinations in our shard
|
// compute the part of virtual sibling's OutMsgQueue with destinations in our shard
|
||||||
sibling_out_msg_queue_ =
|
sibling_out_msg_queue_ =
|
||||||
std::make_unique<vm::AugmentedDictionary>(nb2.outmsg_root, 352, block::tlb::aug_OutMsgQueue);
|
std::make_unique<vm::AugmentedDictionary>(nb2.outmsg_root, 352, block::tlb::aug_OutMsgQueue);
|
||||||
td::BitArray<96> pfx;
|
td::BitArray<96> pfx;
|
||||||
pfx.bits().store_int(shard.workchain, 32);
|
pfx.bits().store_int(workchain(), 32);
|
||||||
(pfx.bits() + 32).store_uint(shard.shard, 64);
|
(pfx.bits() + 32).store_uint(get_shard(), 64);
|
||||||
int l = ton::shard_prefix_length(shard);
|
int l = ton::shard_prefix_length(shard_);
|
||||||
CHECK(sibling_out_msg_queue_->cut_prefix_subdict(pfx.bits(), 32 + l));
|
CHECK(sibling_out_msg_queue_->cut_prefix_subdict(pfx.bits(), 32 + l));
|
||||||
int res2 = block::filter_out_msg_queue(*sibling_out_msg_queue_, nb2.shard(), sib_shard);
|
int res2 = block::filter_out_msg_queue(*sibling_out_msg_queue_, nb2.shard(), sib_shard);
|
||||||
if (res2 < 0) {
|
if (res2 < 0) {
|
||||||
|
@ -888,7 +910,7 @@ bool Collator::add_trivial_neighbor() {
|
||||||
if (!nb2.processed_upto->split(sib_shard)) {
|
if (!nb2.processed_upto->split(sib_shard)) {
|
||||||
return fatal_error("error splitting ProcessedUpto for our virtual sibling");
|
return fatal_error("error splitting ProcessedUpto for our virtual sibling");
|
||||||
}
|
}
|
||||||
nb2.blk_.id.shard = ton::shard_sibling(shard.shard);
|
nb2.blk_.id.shard = ton::shard_sibling(get_shard());
|
||||||
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb2.blk_.to_str()
|
LOG(DEBUG) << "adjusted neighbor #" << i << " : " << nb2.blk_.to_str()
|
||||||
<< " with shard shrinking to our sibling (continued after-split adjustment)";
|
<< " with shard shrinking to our sibling (continued after-split adjustment)";
|
||||||
auto& nb1 = neighbors_.at(n);
|
auto& nb1 = neighbors_.at(n);
|
||||||
|
@ -897,7 +919,7 @@ bool Collator::add_trivial_neighbor() {
|
||||||
LOG(DEBUG) << "created neighbor #" << n << " : " << nb1.blk_.to_str()
|
LOG(DEBUG) << "created neighbor #" << n << " : " << nb1.blk_.to_str()
|
||||||
<< " from our preceding state (continued after-split adjustment)";
|
<< " from our preceding state (continued after-split adjustment)";
|
||||||
cs = 3;
|
cs = 3;
|
||||||
} else if (ton::shard_is_parent(shard, nb.shard()) && shard == prev_shard) {
|
} else if (ton::shard_is_parent(shard_, nb.shard()) && shard_ == prev_shard) {
|
||||||
// case 4. Continued after-merge.
|
// case 4. Continued after-merge.
|
||||||
if (found == 1) {
|
if (found == 1) {
|
||||||
cs = 4;
|
cs = 4;
|
||||||
|
@ -946,7 +968,7 @@ bool Collator::check_prev_block(const BlockIdExt& listed, const BlockIdExt& prev
|
||||||
|
|
||||||
bool Collator::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev) {
|
bool Collator::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev) {
|
||||||
if (listed != prev) {
|
if (listed != prev) {
|
||||||
return fatal_error(PSTRING() << "cannot generate shardchain block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot generate shardchain block for shard " << shard_.to_str()
|
||||||
<< " after previous block " << prev.to_str()
|
<< " after previous block " << prev.to_str()
|
||||||
<< " because masterchain configuration expects another previous block "
|
<< " because masterchain configuration expects another previous block "
|
||||||
<< listed.to_str() << " and we are immediately after a split/merge event");
|
<< listed.to_str() << " and we are immediately after a split/merge event");
|
||||||
|
@ -982,28 +1004,28 @@ bool Collator::check_this_shard_mc_info() {
|
||||||
<< (prev_blocks.size() ? prev_blocks[0].to_str() : "(null)")
|
<< (prev_blocks.size() ? prev_blocks[0].to_str() : "(null)")
|
||||||
<< " because no shard for this workchain is declared yet");
|
<< " because no shard for this workchain is declared yet");
|
||||||
}
|
}
|
||||||
auto left = config_->get_shard_hash(shard - 1, false);
|
auto left = config_->get_shard_hash(shard_ - 1, false);
|
||||||
if (left.is_null()) {
|
if (left.is_null()) {
|
||||||
return fatal_error(PSTRING() << "cannot create new block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot create new block for shard " << shard_.to_str()
|
||||||
<< " because there is no similar shard in existing masterchain configuration");
|
<< " because there is no similar shard in existing masterchain configuration");
|
||||||
}
|
}
|
||||||
if (left->shard() == shard) {
|
if (left->shard() == shard_) {
|
||||||
// no split/merge
|
// no split/merge
|
||||||
if (after_merge_ || after_split_) {
|
if (after_merge_ || after_split_) {
|
||||||
return fatal_error(
|
return fatal_error(
|
||||||
PSTRING() << "cannot generate new shardchain block for " << shard.to_str()
|
PSTRING() << "cannot generate new shardchain block for " << shard_.to_str()
|
||||||
<< " after a supposed split or merge event because this event is not reflected in the masterchain");
|
<< " after a supposed split or merge event because this event is not reflected in the masterchain");
|
||||||
}
|
}
|
||||||
if (!check_prev_block(left->blk_, prev_blocks[0])) {
|
if (!check_prev_block(left->blk_, prev_blocks[0])) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (left->before_split_) {
|
if (left->before_split_) {
|
||||||
return fatal_error(PSTRING() << "cannot generate new unsplit shardchain block for " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot generate new unsplit shardchain block for " << shard_.to_str()
|
||||||
<< " after previous block " << left->blk_.to_str() << " with before_split set");
|
<< " after previous block " << left->blk_.to_str() << " with before_split set");
|
||||||
}
|
}
|
||||||
auto sib = config_->get_shard_hash(shard_sibling(shard));
|
auto sib = config_->get_shard_hash(shard_sibling(shard_));
|
||||||
if (left->before_merge_ && sib->before_merge_) {
|
if (left->before_merge_ && sib->before_merge_) {
|
||||||
return fatal_error(PSTRING() << "cannot generate new unmerged shardchain block for " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot generate new unmerged shardchain block for " << shard_.to_str()
|
||||||
<< " after both " << left->blk_.to_str() << " and " << sib->blk_.to_str()
|
<< " after both " << left->blk_.to_str() << " and " << sib->blk_.to_str()
|
||||||
<< " set before_merge flags");
|
<< " set before_merge flags");
|
||||||
}
|
}
|
||||||
|
@ -1012,35 +1034,35 @@ bool Collator::check_this_shard_mc_info() {
|
||||||
if (shard_splitting_enabled && tmp_now >= left->fsm_utime() && tmp_now + 13 < left->fsm_utime_end()) {
|
if (shard_splitting_enabled && tmp_now >= left->fsm_utime() && tmp_now + 13 < left->fsm_utime_end()) {
|
||||||
now_upper_limit_ = left->fsm_utime_end() - 11; // ultimate value of now_ must be at most now_upper_limit_
|
now_upper_limit_ = left->fsm_utime_end() - 11; // ultimate value of now_ must be at most now_upper_limit_
|
||||||
before_split_ = true;
|
before_split_ = true;
|
||||||
LOG(INFO) << "BEFORE_SPLIT set for the new block of shard " << shard.to_str();
|
LOG(INFO) << "BEFORE_SPLIT set for the new block of shard " << shard_.to_str();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (shard_is_parent(shard, left->shard())) {
|
} else if (shard_is_parent(shard_, left->shard())) {
|
||||||
// after merge
|
// after merge
|
||||||
if (!left->before_merge_) {
|
if (!left->before_merge_) {
|
||||||
return fatal_error(PSTRING() << "cannot create new merged block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot create new merged block for shard " << shard_.to_str()
|
||||||
<< " because its left ancestor " << left->blk_.to_str()
|
<< " because its left ancestor " << left->blk_.to_str()
|
||||||
<< " has no before_merge flag");
|
<< " has no before_merge flag");
|
||||||
}
|
}
|
||||||
auto right = config_->get_shard_hash(shard + 1, false);
|
auto right = config_->get_shard_hash(shard_ + 1, false);
|
||||||
if (right.is_null()) {
|
if (right.is_null()) {
|
||||||
return fatal_error(
|
return fatal_error(
|
||||||
PSTRING()
|
PSTRING()
|
||||||
<< "cannot create new block for shard " << shard.to_str()
|
<< "cannot create new block for shard " << shard_.to_str()
|
||||||
<< " after a preceding merge because there is no right ancestor shard in existing masterchain configuration");
|
<< " after a preceding merge because there is no right ancestor shard in existing masterchain configuration");
|
||||||
}
|
}
|
||||||
if (!shard_is_parent(shard, right->shard())) {
|
if (!shard_is_parent(shard_, right->shard())) {
|
||||||
return fatal_error(PSTRING() << "cannot create new block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot create new block for shard " << shard_.to_str()
|
||||||
<< " after a preceding merge because its right ancestor appears to be "
|
<< " after a preceding merge because its right ancestor appears to be "
|
||||||
<< right->blk_.to_str());
|
<< right->blk_.to_str());
|
||||||
}
|
}
|
||||||
if (!right->before_merge_) {
|
if (!right->before_merge_) {
|
||||||
return fatal_error(PSTRING() << "cannot create new merged block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot create new merged block for shard " << shard_.to_str()
|
||||||
<< " because its right ancestor " << right->blk_.to_str()
|
<< " because its right ancestor " << right->blk_.to_str()
|
||||||
<< " has no before_merge flag");
|
<< " has no before_merge flag");
|
||||||
}
|
}
|
||||||
if (after_split_) {
|
if (after_split_) {
|
||||||
return fatal_error(PSTRING() << "cannot create new block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot create new block for shard " << shard_.to_str()
|
||||||
<< " after a purported split because existing shard configuration suggests a merge");
|
<< " after a purported split because existing shard configuration suggests a merge");
|
||||||
} else if (after_merge_) {
|
} else if (after_merge_) {
|
||||||
if (!(check_prev_block_exact(left->blk_, prev_blocks[0]) &&
|
if (!(check_prev_block_exact(left->blk_, prev_blocks[0]) &&
|
||||||
|
@ -1050,27 +1072,27 @@ bool Collator::check_this_shard_mc_info() {
|
||||||
} else {
|
} else {
|
||||||
auto cseqno = std::max(left->seqno(), right->seqno());
|
auto cseqno = std::max(left->seqno(), right->seqno());
|
||||||
if (prev_blocks[0].seqno() <= cseqno) {
|
if (prev_blocks[0].seqno() <= cseqno) {
|
||||||
return fatal_error(PSTRING() << "cannot create new block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot create new block for shard " << shard_.to_str()
|
||||||
<< " after previous block " << prev_blocks[0].to_str()
|
<< " after previous block " << prev_blocks[0].to_str()
|
||||||
<< " because masterchain contains newer possible ancestors " << left->blk_.to_str()
|
<< " because masterchain contains newer possible ancestors " << left->blk_.to_str()
|
||||||
<< " and " << right->blk_.to_str());
|
<< " and " << right->blk_.to_str());
|
||||||
}
|
}
|
||||||
if (prev_blocks[0].seqno() >= cseqno + 8) {
|
if (prev_blocks[0].seqno() >= cseqno + 8) {
|
||||||
return fatal_error(
|
return fatal_error(
|
||||||
PSTRING() << "cannot create new block for shard " << shard.to_str() << " after previous block "
|
PSTRING() << "cannot create new block for shard " << shard_.to_str() << " after previous block "
|
||||||
<< prev_blocks[0].to_str()
|
<< prev_blocks[0].to_str()
|
||||||
<< " because this would lead to an unregistered chain of length > 8 (masterchain contains only "
|
<< " because this would lead to an unregistered chain of length > 8 (masterchain contains only "
|
||||||
<< left->blk_.to_str() << " and " << right->blk_.to_str() << ")");
|
<< left->blk_.to_str() << " and " << right->blk_.to_str() << ")");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (shard_is_parent(left->shard(), shard)) {
|
} else if (shard_is_parent(left->shard(), shard_)) {
|
||||||
// after split
|
// after split
|
||||||
if (!left->before_split_) {
|
if (!left->before_split_) {
|
||||||
return fatal_error(PSTRING() << "cannot generate new split shardchain block for " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot generate new split shardchain block for " << shard_.to_str()
|
||||||
<< " after previous block " << left->blk_.to_str() << " without before_split");
|
<< " after previous block " << left->blk_.to_str() << " without before_split");
|
||||||
}
|
}
|
||||||
if (after_merge_) {
|
if (after_merge_) {
|
||||||
return fatal_error(PSTRING() << "cannot create new block for shard " << shard.to_str()
|
return fatal_error(PSTRING() << "cannot create new block for shard " << shard_.to_str()
|
||||||
<< " after a purported merge because existing shard configuration suggests a split");
|
<< " after a purported merge because existing shard configuration suggests a split");
|
||||||
} else if (after_split_) {
|
} else if (after_split_) {
|
||||||
if (!(check_prev_block_exact(left->blk_, prev_blocks[0]))) {
|
if (!(check_prev_block_exact(left->blk_, prev_blocks[0]))) {
|
||||||
|
@ -1083,7 +1105,7 @@ bool Collator::check_this_shard_mc_info() {
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return fatal_error(PSTRING() << "masterchain configuration contains only block " << left->blk_.to_str()
|
return fatal_error(PSTRING() << "masterchain configuration contains only block " << left->blk_.to_str()
|
||||||
<< " which belongs to a different shard from ours " << shard.to_str());
|
<< " which belongs to a different shard from ours " << shard_.to_str());
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -1109,7 +1131,7 @@ bool Collator::do_preinit() {
|
||||||
last_block_seqno = prev_blocks[1].seqno();
|
last_block_seqno = prev_blocks[1].seqno();
|
||||||
}
|
}
|
||||||
new_block_seqno = last_block_seqno + 1;
|
new_block_seqno = last_block_seqno + 1;
|
||||||
new_id = ton::BlockId{shard, new_block_seqno};
|
new_id = ton::BlockId{shard_, new_block_seqno};
|
||||||
CHECK(!config_);
|
CHECK(!config_);
|
||||||
CHECK(mc_state_root.not_null());
|
CHECK(mc_state_root.not_null());
|
||||||
LOG(INFO) << "unpacking most recent masterchain state";
|
LOG(INFO) << "unpacking most recent masterchain state";
|
||||||
|
@ -1123,6 +1145,9 @@ bool Collator::do_preinit() {
|
||||||
if (!is_masterchain() && !check_this_shard_mc_info()) {
|
if (!is_masterchain() && !check_this_shard_mc_info()) {
|
||||||
return fatal_error("fatal error while checking masterchain configuration of the current shard");
|
return fatal_error("fatal error while checking masterchain configuration of the current shard");
|
||||||
}
|
}
|
||||||
|
if (!check_cur_validator_set()) {
|
||||||
|
return fatal_error("this validator set is not entitled to create a block for this shardchain");
|
||||||
|
}
|
||||||
CHECK(!prev_mc_block_seqno || mc_block_root.not_null());
|
CHECK(!prev_mc_block_seqno || mc_block_root.not_null());
|
||||||
if (!unpack_last_state()) {
|
if (!unpack_last_state()) {
|
||||||
return fatal_error("cannot unpack previous state of current shardchain");
|
return fatal_error("cannot unpack previous state of current shardchain");
|
||||||
|
@ -1642,7 +1667,7 @@ bool Collator::init_value_create() {
|
||||||
value_flow_.minted.set_zero();
|
value_flow_.minted.set_zero();
|
||||||
}
|
}
|
||||||
} else if (workchain() == basechainId) {
|
} else if (workchain() == basechainId) {
|
||||||
value_flow_.created = block::CurrencyCollection{basechain_create_fee_ >> ton::shard_prefix_length(shard)};
|
value_flow_.created = block::CurrencyCollection{basechain_create_fee_ >> ton::shard_prefix_length(shard_)};
|
||||||
}
|
}
|
||||||
value_flow_.fees_collected += value_flow_.created;
|
value_flow_.fees_collected += value_flow_.created;
|
||||||
return true;
|
return true;
|
||||||
|
@ -1672,7 +1697,7 @@ bool Collator::do_collate() {
|
||||||
// 1.3. create OutputQueueMerger from adjusted neighbors
|
// 1.3. create OutputQueueMerger from adjusted neighbors
|
||||||
CHECK(!nb_out_msgs_);
|
CHECK(!nb_out_msgs_);
|
||||||
LOG(DEBUG) << "creating OutputQueueMerger";
|
LOG(DEBUG) << "creating OutputQueueMerger";
|
||||||
nb_out_msgs_ = std::make_unique<block::OutputQueueMerger>(shard, neighbors_);
|
nb_out_msgs_ = std::make_unique<block::OutputQueueMerger>(shard_, neighbors_);
|
||||||
// 1.4. compute created / minted / recovered
|
// 1.4. compute created / minted / recovered
|
||||||
if (!init_value_create()) {
|
if (!init_value_create()) {
|
||||||
return fatal_error("cannot compute the value to be created / minted / recovered");
|
return fatal_error("cannot compute the value to be created / minted / recovered");
|
||||||
|
@ -1866,7 +1891,7 @@ std::unique_ptr<block::Account> Collator::make_account_from(td::ConstBitPtr addr
|
||||||
if (account.is_null() && !force_create) {
|
if (account.is_null() && !force_create) {
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
auto ptr = std::make_unique<block::Account>(shard.workchain, addr);
|
auto ptr = std::make_unique<block::Account>(workchain(), addr);
|
||||||
if (account.is_null()) {
|
if (account.is_null()) {
|
||||||
ptr->created = true;
|
ptr->created = true;
|
||||||
if (!ptr->init_new(now_)) {
|
if (!ptr->init_new(now_)) {
|
||||||
|
@ -1900,8 +1925,9 @@ td::Result<block::Account*> Collator::make_account(td::ConstBitPtr addr, bool fo
|
||||||
if (!new_acc) {
|
if (!new_acc) {
|
||||||
return td::Status::Error(PSTRING() << "cannot load account " << addr.to_hex(256) << " from previous state");
|
return td::Status::Error(PSTRING() << "cannot load account " << addr.to_hex(256) << " from previous state");
|
||||||
}
|
}
|
||||||
if (!new_acc->belongs_to_shard(shard)) {
|
if (!new_acc->belongs_to_shard(shard_)) {
|
||||||
return td::Status::Error(PSTRING() << "account " << addr.to_hex(256) << " does not really belong to current shard");
|
return td::Status::Error(PSTRING() << "account " << addr.to_hex(256) << " does not really belong to current shard "
|
||||||
|
<< shard_.to_str());
|
||||||
}
|
}
|
||||||
auto ins = accounts.emplace(addr, std::move(new_acc));
|
auto ins = accounts.emplace(addr, std::move(new_acc));
|
||||||
if (!ins.second) {
|
if (!ins.second) {
|
||||||
|
@ -1920,7 +1946,7 @@ bool Collator::combine_account_transactions() {
|
||||||
// have transactions for this account
|
// have transactions for this account
|
||||||
vm::CellBuilder cb;
|
vm::CellBuilder cb;
|
||||||
if (!acc.create_account_block(cb)) {
|
if (!acc.create_account_block(cb)) {
|
||||||
return fatal_error(std::string{"cannot create AccountBlock for account "} + z.first.to_hex());
|
return fatal_error("cannot create AccountBlock for account "s + z.first.to_hex());
|
||||||
}
|
}
|
||||||
auto cell = cb.finalize();
|
auto cell = cb.finalize();
|
||||||
auto csr = vm::load_cell_slice_ref(cell);
|
auto csr = vm::load_cell_slice_ref(cell);
|
||||||
|
@ -2084,8 +2110,8 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t
|
||||||
req_start_lt = std::max(req_start_lt, start_lt + 1);
|
req_start_lt = std::max(req_start_lt, start_lt + 1);
|
||||||
if (acc->last_trans_end_lt_ >= start_lt && acc->transactions.empty()) {
|
if (acc->last_trans_end_lt_ >= start_lt && acc->transactions.empty()) {
|
||||||
return fatal_error(td::Status::Error(-666, PSTRING()
|
return fatal_error(td::Status::Error(-666, PSTRING()
|
||||||
<< "last transaction time in the state of account "
|
<< "last transaction time in the state of account " << workchain()
|
||||||
<< shard.workchain << ":" << smc_addr.to_hex() << " is too large"));
|
<< ":" << smc_addr.to_hex() << " is too large"));
|
||||||
}
|
}
|
||||||
std::unique_ptr<block::Transaction> trans = std::make_unique<block::Transaction>(
|
std::unique_ptr<block::Transaction> trans = std::make_unique<block::Transaction>(
|
||||||
*acc, mask == 2 ? block::Transaction::tr_tick : block::Transaction::tr_tock, req_start_lt, now_);
|
*acc, mask == 2 ? block::Transaction::tr_tick : block::Transaction::tr_tock, req_start_lt, now_);
|
||||||
|
@ -2154,7 +2180,7 @@ Ref<vm::Cell> Collator::create_ordinary_transaction(Ref<vm::Cell> msg_root) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
ton::WorkchainId wc;
|
ton::WorkchainId wc;
|
||||||
if (!block::tlb::t_MsgAddressInt.extract_std_address(dest, wc, addr) || wc != shard.workchain) {
|
if (!block::tlb::t_MsgAddressInt.extract_std_address(dest, wc, addr) || wc != workchain()) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
LOG(DEBUG) << "inbound message to our smart contract " << addr.to_hex();
|
LOG(DEBUG) << "inbound message to our smart contract " << addr.to_hex();
|
||||||
|
@ -2166,7 +2192,7 @@ Ref<vm::Cell> Collator::create_ordinary_transaction(Ref<vm::Cell> msg_root) {
|
||||||
block::Account* acc = acc_res.move_as_ok();
|
block::Account* acc = acc_res.move_as_ok();
|
||||||
assert(acc);
|
assert(acc);
|
||||||
if (acc->last_trans_end_lt_ >= start_lt && acc->transactions.empty()) {
|
if (acc->last_trans_end_lt_ >= start_lt && acc->transactions.empty()) {
|
||||||
fatal_error(PSTRING() << "last transaction time in the state of account " << shard.workchain << ":" << addr.to_hex()
|
fatal_error(PSTRING() << "last transaction time in the state of account " << workchain() << ":" << addr.to_hex()
|
||||||
<< " is too large");
|
<< " is too large");
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
@ -2288,11 +2314,11 @@ bool Collator::is_our_address(Ref<vm::CellSlice> addr_ref) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Collator::is_our_address(ton::AccountIdPrefixFull addr_pfx) const {
|
bool Collator::is_our_address(ton::AccountIdPrefixFull addr_pfx) const {
|
||||||
return ton::shard_contains(shard, addr_pfx);
|
return ton::shard_contains(shard_, addr_pfx);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Collator::is_our_address(const ton::StdSmcAddress& addr) const {
|
bool Collator::is_our_address(const ton::StdSmcAddress& addr) const {
|
||||||
return ton::shard_contains(shard.shard, addr);
|
return ton::shard_contains(get_shard(), addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1 = processed, 0 = enqueued, 3 = processed, all future messages must be enqueued
|
// 1 = processed, 0 = enqueued, 3 = processed, all future messages must be enqueued
|
||||||
|
@ -2410,7 +2436,7 @@ bool Collator::enqueue_transit_message(Ref<vm::Cell> msg, Ref<vm::Cell> old_msg_
|
||||||
LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256);
|
LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256);
|
||||||
bool requeue = is_our_address(prev_prefix);
|
bool requeue = is_our_address(prev_prefix);
|
||||||
// 1. perform hypercube routing
|
// 1. perform hypercube routing
|
||||||
auto route_info = block::perform_hypercube_routing(cur_prefix, dest_prefix, shard);
|
auto route_info = block::perform_hypercube_routing(cur_prefix, dest_prefix, shard_);
|
||||||
if ((unsigned)route_info.first > 96 || (unsigned)route_info.second > 96) {
|
if ((unsigned)route_info.first > 96 || (unsigned)route_info.second > 96) {
|
||||||
return fatal_error("cannot perform hypercube routing for a transit message");
|
return fatal_error("cannot perform hypercube routing for a transit message");
|
||||||
}
|
}
|
||||||
|
@ -2572,7 +2598,7 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
// 5.2. next_prefix must belong to our shard
|
// 5.2. next_prefix must belong to our shard
|
||||||
if (!ton::shard_contains(shard, next_prefix)) {
|
if (!ton::shard_contains(shard_, next_prefix)) {
|
||||||
LOG(ERROR) << "inbound internal message does not have next hop address in our shard";
|
LOG(ERROR) << "inbound internal message does not have next hop address in our shard";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -2594,8 +2620,8 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
|
||||||
}
|
}
|
||||||
// 6. check whether we have already processed this message before using ProcessedUpTo (processed_upto)
|
// 6. check whether we have already processed this message before using ProcessedUpTo (processed_upto)
|
||||||
// (then silently ignore this message; NB: it can be ours after merge)
|
// (then silently ignore this message; NB: it can be ours after merge)
|
||||||
bool our = ton::shard_contains(shard, cur_prefix);
|
bool our = ton::shard_contains(shard_, cur_prefix);
|
||||||
bool to_us = ton::shard_contains(shard, dest_prefix);
|
bool to_us = ton::shard_contains(shard_, dest_prefix);
|
||||||
|
|
||||||
block::EnqueuedMsgDescr enq_msg_descr{cur_prefix, next_prefix, info.created_lt, enqueued_lt,
|
block::EnqueuedMsgDescr enq_msg_descr{cur_prefix, next_prefix, info.created_lt, enqueued_lt,
|
||||||
env.msg->get_hash().bits()};
|
env.msg->get_hash().bits()};
|
||||||
|
@ -2734,7 +2760,7 @@ int Collator::process_external_message(Ref<vm::Cell> msg) {
|
||||||
// 1. create a Transaction processing this Message
|
// 1. create a Transaction processing this Message
|
||||||
auto trans_root = create_ordinary_transaction(msg);
|
auto trans_root = create_ordinary_transaction(msg);
|
||||||
if (trans_root.is_null()) {
|
if (trans_root.is_null()) {
|
||||||
if (busy) {
|
if (busy_) {
|
||||||
// transaction rejected by account
|
// transaction rejected by account
|
||||||
LOG(DEBUG) << "external message rejected by account, skipping";
|
LOG(DEBUG) << "external message rejected by account, skipping";
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -2846,7 +2872,7 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema
|
||||||
return fatal_error("cannot enqueue a new message because its destination shard is invalid");
|
return fatal_error("cannot enqueue a new message because its destination shard is invalid");
|
||||||
}
|
}
|
||||||
// 1. perform hypercube routing
|
// 1. perform hypercube routing
|
||||||
auto route_info = block::perform_hypercube_routing(src_prefix, dest_prefix, shard);
|
auto route_info = block::perform_hypercube_routing(src_prefix, dest_prefix, shard_);
|
||||||
if ((unsigned)route_info.first > 96 || (unsigned)route_info.second > 96) {
|
if ((unsigned)route_info.first > 96 || (unsigned)route_info.second > 96) {
|
||||||
return fatal_error("cannot perform hypercube routing for a new outbound message");
|
return fatal_error("cannot perform hypercube routing for a new outbound message");
|
||||||
}
|
}
|
||||||
|
@ -3155,10 +3181,10 @@ bool Collator::create_mc_state_extra() {
|
||||||
cc_updated = true;
|
cc_updated = true;
|
||||||
LOG(INFO) << "increased masterchain catchain seqno to " << val_info.catchain_seqno;
|
LOG(INFO) << "increased masterchain catchain seqno to " << val_info.catchain_seqno;
|
||||||
}
|
}
|
||||||
auto nodes = block::Config::do_compute_validator_set(ccvc, shard, *cur_validators, now_, val_info.catchain_seqno);
|
auto nodes = block::Config::do_compute_validator_set(ccvc, shard_, *cur_validators, now_, val_info.catchain_seqno);
|
||||||
LOG_CHECK(!nodes.empty()) << "validator node list in unpacked validator set is empty";
|
LOG_CHECK(!nodes.empty()) << "validator node list in unpacked validator set is empty";
|
||||||
|
|
||||||
auto vlist_hash = block::compute_validator_set_hash(/* val_info.catchain_seqno */ 0, shard, std::move(nodes));
|
auto vlist_hash = block::compute_validator_set_hash(/* val_info.catchain_seqno */ 0, shard_, std::move(nodes));
|
||||||
LOG(INFO) << "masterchain validator set hash changed from " << val_info.validator_list_hash_short << " to "
|
LOG(INFO) << "masterchain validator set hash changed from " << val_info.validator_list_hash_short << " to "
|
||||||
<< vlist_hash;
|
<< vlist_hash;
|
||||||
val_info.nx_cc_updated = cc_updated & update_shard_cc;
|
val_info.nx_cc_updated = cc_updated & update_shard_cc;
|
||||||
|
@ -3542,7 +3568,7 @@ bool Collator::create_shard_state() {
|
||||||
if (!(cb.store_long_bool(0x9023afe2, 32) // shard_state#9023afe2
|
if (!(cb.store_long_bool(0x9023afe2, 32) // shard_state#9023afe2
|
||||||
&& cb.store_long_bool(global_id_, 32) // global_id:int32
|
&& cb.store_long_bool(global_id_, 32) // global_id:int32
|
||||||
&& global_id_ // { global_id != 0 }
|
&& global_id_ // { global_id != 0 }
|
||||||
&& block::ShardId{shard}.serialize(cb) // shard_id:ShardIdent
|
&& block::ShardId{shard_}.serialize(cb) // shard_id:ShardIdent
|
||||||
&& cb.store_long_bool(new_block_seqno, 32) // seq_no:uint32
|
&& cb.store_long_bool(new_block_seqno, 32) // seq_no:uint32
|
||||||
&& cb.store_long_bool(vert_seqno_, 32) // vert_seq_no:#
|
&& cb.store_long_bool(vert_seqno_, 32) // vert_seq_no:#
|
||||||
&& cb.store_long_bool(now_, 32) // gen_utime:uint32
|
&& cb.store_long_bool(now_, 32) // gen_utime:uint32
|
||||||
|
@ -3668,8 +3694,8 @@ bool Collator::compute_total_balance() {
|
||||||
bool Collator::create_block_info(Ref<vm::Cell>& block_info) {
|
bool Collator::create_block_info(Ref<vm::Cell>& block_info) {
|
||||||
vm::CellBuilder cb, cb2;
|
vm::CellBuilder cb, cb2;
|
||||||
bool mc = is_masterchain();
|
bool mc = is_masterchain();
|
||||||
td::uint32 val_hash = validator_set->get_validator_set_hash();
|
td::uint32 val_hash = validator_set_->get_validator_set_hash();
|
||||||
CatchainSeqno cc_seqno = validator_set->get_catchain_seqno();
|
CatchainSeqno cc_seqno = validator_set_->get_catchain_seqno();
|
||||||
return cb.store_long_bool(0x9bc7a987, 32) // block_info#9bc7a987
|
return cb.store_long_bool(0x9bc7a987, 32) // block_info#9bc7a987
|
||||||
&& cb.store_long_bool(0, 32) // version:uint32
|
&& cb.store_long_bool(0, 32) // version:uint32
|
||||||
&& cb.store_bool_bool(!mc) // not_master:(## 1)
|
&& cb.store_bool_bool(!mc) // not_master:(## 1)
|
||||||
|
@ -3682,7 +3708,7 @@ bool Collator::create_block_info(Ref<vm::Cell>& block_info) {
|
||||||
&& cb.store_long_bool((int)report_version_, 9) // vert_seqno_incr:(## 1) flags:(## 8)
|
&& cb.store_long_bool((int)report_version_, 9) // vert_seqno_incr:(## 1) flags:(## 8)
|
||||||
&& cb.store_long_bool(new_block_seqno, 32) // seq_no:#
|
&& cb.store_long_bool(new_block_seqno, 32) // seq_no:#
|
||||||
&& cb.store_long_bool(vert_seqno_, 32) // vert_seq_no:#
|
&& cb.store_long_bool(vert_seqno_, 32) // vert_seq_no:#
|
||||||
&& block::ShardId{shard}.serialize(cb) // shard:ShardIdent
|
&& block::ShardId{shard_}.serialize(cb) // shard:ShardIdent
|
||||||
&& cb.store_long_bool(now_, 32) // gen_utime:uint32
|
&& cb.store_long_bool(now_, 32) // gen_utime:uint32
|
||||||
&& cb.store_long_bool(start_lt, 64) // start_lt:uint64
|
&& cb.store_long_bool(start_lt, 64) // start_lt:uint64
|
||||||
&& cb.store_long_bool(max_lt, 64) // end_lt:uint64
|
&& cb.store_long_bool(max_lt, 64) // end_lt:uint64
|
||||||
|
@ -3891,7 +3917,7 @@ bool Collator::create_block_candidate() {
|
||||||
// 3. create a BlockCandidate
|
// 3. create a BlockCandidate
|
||||||
block_candidate = std::make_unique<BlockCandidate>(
|
block_candidate = std::make_unique<BlockCandidate>(
|
||||||
created_by_,
|
created_by_,
|
||||||
ton::BlockIdExt{ton::BlockId{shard, new_block_seqno}, new_block->get_hash().bits(),
|
ton::BlockIdExt{ton::BlockId{shard_, new_block_seqno}, new_block->get_hash().bits(),
|
||||||
block::compute_file_hash(blk_slice.as_slice())},
|
block::compute_file_hash(blk_slice.as_slice())},
|
||||||
block::compute_file_hash(cdata_slice.as_slice()), blk_slice.clone(), cdata_slice.clone());
|
block::compute_file_hash(cdata_slice.as_slice()), blk_slice.clone(), cdata_slice.clone());
|
||||||
// 4. save block candidate
|
// 4. save block candidate
|
||||||
|
@ -3921,7 +3947,7 @@ void Collator::return_block_candidate(td::Result<td::Unit> saved) {
|
||||||
CHECK(block_candidate);
|
CHECK(block_candidate);
|
||||||
LOG(INFO) << "sending new BlockCandidate to Promise";
|
LOG(INFO) << "sending new BlockCandidate to Promise";
|
||||||
main_promise(block_candidate->clone());
|
main_promise(block_candidate->clone());
|
||||||
busy = false;
|
busy_ = false;
|
||||||
stop();
|
stop();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3965,7 +3991,7 @@ td::Result<bool> Collator::register_external_message_cell(Ref<vm::Cell> ext_msg,
|
||||||
return td::Status::Error("destination of an inbound external message is an invalid blockchain address");
|
return td::Status::Error("destination of an inbound external message is an invalid blockchain address");
|
||||||
}
|
}
|
||||||
// NB: previous checks are quite general and can be done at an outer level before multiplexing to correct Collator
|
// NB: previous checks are quite general and can be done at an outer level before multiplexing to correct Collator
|
||||||
if (!ton::shard_contains(shard, dest_prefix)) {
|
if (!ton::shard_contains(shard_, dest_prefix)) {
|
||||||
return td::Status::Error("inbound external message has destination address not in this shard");
|
return td::Status::Error("inbound external message has destination address not in this shard");
|
||||||
}
|
}
|
||||||
if (verbosity > 2) {
|
if (verbosity > 2) {
|
||||||
|
|
|
@ -1737,14 +1737,20 @@ bool ValidateQuery::register_shard_block_creators(std::vector<td::Bits256> creat
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// similar to Collator::check_cur_validator_set()
|
||||||
bool ValidateQuery::check_cur_validator_set() {
|
bool ValidateQuery::check_cur_validator_set() {
|
||||||
CatchainSeqno cc_seqno = 0;
|
CatchainSeqno cc_seqno = 0;
|
||||||
auto nodes = config_->compute_validator_set_cc(shard_, now_, &cc_seqno);
|
auto nodes = config_->compute_validator_set_cc(shard_, now_, &cc_seqno);
|
||||||
if (nodes.empty()) {
|
if (nodes.empty()) {
|
||||||
return reject_query("cannot compute masterchain validator set from old masterchain state");
|
return reject_query("cannot compute validator set for shard "s + shard_.to_str() + " from old masterchain state");
|
||||||
}
|
}
|
||||||
std::vector<ValidatorDescr> export_nodes;
|
std::vector<ValidatorDescr> export_nodes;
|
||||||
if (validator_set_.not_null()) {
|
if (validator_set_.not_null()) {
|
||||||
|
if (validator_set_->get_catchain_seqno() != cc_seqno) {
|
||||||
|
return reject_query(PSTRING() << "current validator set catchain seqno mismatch: this validator set has cc_seqno="
|
||||||
|
<< validator_set_->get_catchain_seqno() << ", only validator set with cc_seqno="
|
||||||
|
<< cc_seqno << " is entitled to create block " << id_.to_str());
|
||||||
|
}
|
||||||
export_nodes = validator_set_->export_vector();
|
export_nodes = validator_set_->export_vector();
|
||||||
}
|
}
|
||||||
if (export_nodes != nodes /* && !is_fake_ */) {
|
if (export_nodes != nodes /* && !is_fake_ */) {
|
||||||
|
|
|
@ -30,15 +30,16 @@ namespace validator {
|
||||||
|
|
||||||
namespace fullnode {
|
namespace fullnode {
|
||||||
|
|
||||||
DownloadProof::DownloadProof(BlockIdExt block_id, bool allow_partial_proof, adnl::AdnlNodeIdShort local_id,
|
DownloadProof::DownloadProof(BlockIdExt block_id, bool allow_partial_proof, bool is_key_block,
|
||||||
overlay::OverlayIdShort overlay_id, adnl::AdnlNodeIdShort download_from,
|
adnl::AdnlNodeIdShort local_id, overlay::OverlayIdShort overlay_id,
|
||||||
td::uint32 priority, td::Timestamp timeout,
|
adnl::AdnlNodeIdShort download_from, td::uint32 priority, td::Timestamp timeout,
|
||||||
td::actor::ActorId<ValidatorManagerInterface> validator_manager,
|
td::actor::ActorId<ValidatorManagerInterface> validator_manager,
|
||||||
td::actor::ActorId<rldp::Rldp> rldp, td::actor::ActorId<overlay::Overlays> overlays,
|
td::actor::ActorId<rldp::Rldp> rldp, td::actor::ActorId<overlay::Overlays> overlays,
|
||||||
td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<adnl::AdnlExtClient> client,
|
td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<adnl::AdnlExtClient> client,
|
||||||
td::Promise<td::BufferSlice> promise)
|
td::Promise<td::BufferSlice> promise)
|
||||||
: block_id_(block_id)
|
: block_id_(block_id)
|
||||||
, allow_partial_proof_(allow_partial_proof)
|
, allow_partial_proof_(allow_partial_proof)
|
||||||
|
, is_key_block_(is_key_block)
|
||||||
, local_id_(local_id)
|
, local_id_(local_id)
|
||||||
, overlay_id_(overlay_id)
|
, overlay_id_(overlay_id)
|
||||||
, download_from_(download_from)
|
, download_from_(download_from)
|
||||||
|
@ -155,8 +156,14 @@ void DownloadProof::got_node_to_download(adnl::AdnlNodeIdShort node) {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
auto query = create_serialize_tl_object<ton_api::tonNode_prepareBlockProof>(create_tl_block_id(block_id_),
|
td::BufferSlice query;
|
||||||
|
if (!is_key_block_) {
|
||||||
|
query = create_serialize_tl_object<ton_api::tonNode_prepareBlockProof>(create_tl_block_id(block_id_),
|
||||||
allow_partial_proof_);
|
allow_partial_proof_);
|
||||||
|
} else {
|
||||||
|
query = create_serialize_tl_object<ton_api::tonNode_prepareKeyBlockProof>(create_tl_block_id(block_id_),
|
||||||
|
allow_partial_proof_);
|
||||||
|
}
|
||||||
|
|
||||||
if (client_.empty()) {
|
if (client_.empty()) {
|
||||||
td::actor::send_closure(overlays_, &overlay::Overlays::send_query, download_from_, local_id_, overlay_id_,
|
td::actor::send_closure(overlays_, &overlay::Overlays::send_query, download_from_, local_id_, overlay_id_,
|
||||||
|
@ -190,7 +197,12 @@ void DownloadProof::got_block_proof_description(td::BufferSlice proof_descriptio
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
auto query = create_serialize_tl_object<ton_api::tonNode_downloadBlockProof>(create_tl_block_id(block_id_));
|
td::BufferSlice query;
|
||||||
|
if (!is_key_block_) {
|
||||||
|
query = create_serialize_tl_object<ton_api::tonNode_downloadBlockProof>(create_tl_block_id(block_id_));
|
||||||
|
} else {
|
||||||
|
query = create_serialize_tl_object<ton_api::tonNode_downloadKeyBlockProof>(create_tl_block_id(block_id_));
|
||||||
|
}
|
||||||
if (client_.empty()) {
|
if (client_.empty()) {
|
||||||
td::actor::send_closure(overlays_, &overlay::Overlays::send_query_via, download_from_, local_id_,
|
td::actor::send_closure(overlays_, &overlay::Overlays::send_query_via, download_from_, local_id_,
|
||||||
overlay_id_, "download block proof", std::move(P), td::Timestamp::in(3.0),
|
overlay_id_, "download block proof", std::move(P), td::Timestamp::in(3.0),
|
||||||
|
@ -214,8 +226,14 @@ void DownloadProof::got_block_proof_description(td::BufferSlice proof_descriptio
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
auto query =
|
td::BufferSlice query;
|
||||||
|
if (!is_key_block_) {
|
||||||
|
query =
|
||||||
create_serialize_tl_object<ton_api::tonNode_downloadBlockProofLink>(create_tl_block_id(block_id_));
|
create_serialize_tl_object<ton_api::tonNode_downloadBlockProofLink>(create_tl_block_id(block_id_));
|
||||||
|
} else {
|
||||||
|
query =
|
||||||
|
create_serialize_tl_object<ton_api::tonNode_downloadKeyBlockProofLink>(create_tl_block_id(block_id_));
|
||||||
|
}
|
||||||
if (client_.empty()) {
|
if (client_.empty()) {
|
||||||
td::actor::send_closure(overlays_, &overlay::Overlays::send_query_via, download_from_, local_id_,
|
td::actor::send_closure(overlays_, &overlay::Overlays::send_query_via, download_from_, local_id_,
|
||||||
overlay_id_, "download block proof link", std::move(P), td::Timestamp::in(3.0),
|
overlay_id_, "download block proof link", std::move(P), td::Timestamp::in(3.0),
|
||||||
|
|
|
@ -32,7 +32,7 @@ namespace fullnode {
|
||||||
|
|
||||||
class DownloadProof : public td::actor::Actor {
|
class DownloadProof : public td::actor::Actor {
|
||||||
public:
|
public:
|
||||||
DownloadProof(BlockIdExt block_id, bool allow_partial_proof, adnl::AdnlNodeIdShort local_id,
|
DownloadProof(BlockIdExt block_id, bool allow_partial_proof, bool is_key_block, adnl::AdnlNodeIdShort local_id,
|
||||||
overlay::OverlayIdShort overlay_id, adnl::AdnlNodeIdShort download_from, td::uint32 priority,
|
overlay::OverlayIdShort overlay_id, adnl::AdnlNodeIdShort download_from, td::uint32 priority,
|
||||||
td::Timestamp timeout, td::actor::ActorId<ValidatorManagerInterface> validator_manager,
|
td::Timestamp timeout, td::actor::ActorId<ValidatorManagerInterface> validator_manager,
|
||||||
td::actor::ActorId<rldp::Rldp> rldp, td::actor::ActorId<overlay::Overlays> overlays,
|
td::actor::ActorId<rldp::Rldp> rldp, td::actor::ActorId<overlay::Overlays> overlays,
|
||||||
|
@ -54,6 +54,7 @@ class DownloadProof : public td::actor::Actor {
|
||||||
private:
|
private:
|
||||||
BlockIdExt block_id_;
|
BlockIdExt block_id_;
|
||||||
bool allow_partial_proof_;
|
bool allow_partial_proof_;
|
||||||
|
bool is_key_block_;
|
||||||
adnl::AdnlNodeIdShort local_id_;
|
adnl::AdnlNodeIdShort local_id_;
|
||||||
overlay::OverlayIdShort overlay_id_;
|
overlay::OverlayIdShort overlay_id_;
|
||||||
|
|
||||||
|
|
|
@ -178,9 +178,9 @@ void GetNextKeyBlocks::download_next_proof() {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
td::actor::create_actor<DownloadProof>("downloadproofreq", block_id, false, local_id_, overlay_id_, download_from_,
|
td::actor::create_actor<DownloadProof>("downloadproofreq", block_id, false, true, local_id_, overlay_id_,
|
||||||
priority_, timeout_, validator_manager_, rldp_, overlays_, adnl_, client_,
|
download_from_, priority_, timeout_, validator_manager_, rldp_, overlays_,
|
||||||
std::move(P))
|
adnl_, client_, std::move(P))
|
||||||
.release();
|
.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue