mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
Merge message dispatch queue (#1030)
* Deferred messages and msg metadata * Store out msg queue size in state * Add checks for queue processing 1. Collator must process at least one message from AccountDispatchQueue (unless block is full) 2. The first message from a transaction is not counted, it cannot be deferred (unless AccountDispatchQueue is not empty) * Return msg metadata from LS in listBlockTransactions[Ext] * Enable new features by capabilities * Changes in deferred messages * Process deferred messages via new_msgs in collator * Rework setting deferred_lt, bring back check_message_processing_order, check order of deferred_lt in validator * Use have_unprocessed_account_dispatch_queue_ in collator * Fix setting transaction lt for deferred messages * Fix lite-client compilation error * Changes in process_dispatch_queue, rename deferred_lt -> emitted_lt * Fix compilation error * Use uint64 for msg queue size * Add liteServer.getBlockOutMsgQueueSize * Fix compilation error * Fix typos in comments --------- Co-authored-by: SpyCheese <mikle98@yandex.ru>
This commit is contained in:
parent
38fc1d5456
commit
0daee1d887
29 changed files with 1889 additions and 318 deletions
|
@ -813,19 +813,45 @@ int IntermediateAddress::get_size(const vm::CellSlice& cs) const {
|
||||||
const IntermediateAddress t_IntermediateAddress;
|
const IntermediateAddress t_IntermediateAddress;
|
||||||
|
|
||||||
bool MsgEnvelope::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
bool MsgEnvelope::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
||||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
switch (get_tag(cs)) {
|
||||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress
|
case 4:
|
||||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress
|
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||||
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams
|
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress
|
||||||
&& t_Ref_Message.validate_skip(ops, cs, weak); // msg:^Message
|
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams
|
||||||
|
&& t_Ref_Message.validate_skip(ops, cs, weak); // msg:^Message
|
||||||
|
case 5:
|
||||||
|
return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5
|
||||||
|
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress
|
||||||
|
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams
|
||||||
|
&& t_Ref_Message.validate_skip(ops, cs, weak) // msg:^Message
|
||||||
|
&& Maybe<UInt>(64).validate_skip(ops, cs, weak) // emitted_lt:(Maybe uint64)
|
||||||
|
&& Maybe<gen::MsgMetadata>().validate_skip(ops, cs, weak); // metadata:(Maybe MsgMetadata)
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MsgEnvelope::skip(vm::CellSlice& cs) const {
|
bool MsgEnvelope::skip(vm::CellSlice& cs) const {
|
||||||
return cs.advance(4) // msg_envelope#4
|
switch (get_tag(cs)) {
|
||||||
&& t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress
|
case 4:
|
||||||
&& t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress
|
return cs.advance(4) // msg_envelope#4
|
||||||
&& t_Grams.skip(cs) // fwd_fee_remaining:Grams
|
&& t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress
|
||||||
&& t_Ref_Message.skip(cs); // msg:^Message
|
&& t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.skip(cs) // fwd_fee_remaining:Grams
|
||||||
|
&& t_Ref_Message.skip(cs); // msg:^Message
|
||||||
|
case 5:
|
||||||
|
return cs.advance(4) // msg_envelope_v2#5
|
||||||
|
&& t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress
|
||||||
|
&& t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.skip(cs) // fwd_fee_remaining:Grams
|
||||||
|
&& t_Ref_Message.skip(cs) // msg:^Message
|
||||||
|
&& Maybe<UInt>(64).skip(cs) // emitted_lt:(Maybe uint64)
|
||||||
|
&& Maybe<gen::MsgMetadata>().skip(cs); // metadata:(Maybe MsgMetadata)
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MsgEnvelope::extract_fwd_fees_remaining(vm::CellSlice& cs) const {
|
bool MsgEnvelope::extract_fwd_fees_remaining(vm::CellSlice& cs) const {
|
||||||
|
@ -833,34 +859,101 @@ bool MsgEnvelope::extract_fwd_fees_remaining(vm::CellSlice& cs) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record& data) const {
|
bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record& data) const {
|
||||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
switch (get_tag(cs)) {
|
||||||
&& t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
case 4:
|
||||||
&& t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress
|
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||||
&& t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
&& t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||||
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
&& t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||||
|
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
||||||
|
case 5:
|
||||||
|
return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5
|
||||||
|
&& t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||||
|
&& t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||||
|
&& cs.fetch_ref_to(data.msg) // msg:^Message
|
||||||
|
&& Maybe<UInt>(64).skip(cs) // emitted_lt:(Maybe uint64)
|
||||||
|
&& Maybe<gen::MsgMetadata>().skip(cs); // metadata:(Maybe MsgMetadata)
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record_std& data) const {
|
bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record_std& data) const {
|
||||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
data.emitted_lt = {};
|
||||||
&& t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
data.metadata = {};
|
||||||
&& t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress
|
switch (get_tag(cs)) {
|
||||||
&& t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
case 4:
|
||||||
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||||
|
&& t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||||
|
&& t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||||
|
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
||||||
|
case 5: {
|
||||||
|
bool with_metadata, with_emitted_lt;
|
||||||
|
return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5
|
||||||
|
&& t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||||
|
&& t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||||
|
&& t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||||
|
&& cs.fetch_ref_to(data.msg) // msg:^Message
|
||||||
|
&& cs.fetch_bool_to(with_emitted_lt) &&
|
||||||
|
(!with_emitted_lt || cs.fetch_uint_to(64, data.emitted_lt.value_force())) // emitted_lt:(Maybe uint64)
|
||||||
|
&& cs.fetch_bool_to(with_metadata) &&
|
||||||
|
(!with_metadata || data.metadata.value_force().unpack(cs)); // metadata:(Maybe MsgMetadata)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MsgEnvelope::unpack_std(vm::CellSlice& cs, int& cur_a, int& nhop_a, Ref<vm::Cell>& msg) const {
|
bool MsgEnvelope::pack(vm::CellBuilder& cb, const Record_std& data) const {
|
||||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
bool v2 = (bool)data.metadata || (bool)data.emitted_lt;
|
||||||
&& t_IntermediateAddress.fetch_regular(cs, cur_a) // cur_addr:IntermediateAddress
|
if (!(cb.store_long_bool(v2 ? 5 : 4, 4) && // msg_envelope#4 / msg_envelope_v2#5
|
||||||
&& t_IntermediateAddress.fetch_regular(cs, nhop_a) // next_addr:IntermediateAddress
|
cb.store_long_bool(data.cur_addr, 8) && // cur_addr:IntermediateAddress
|
||||||
&& cs.fetch_ref_to(msg);
|
cb.store_long_bool(data.next_addr, 8) && // next_addr:IntermediateAddress
|
||||||
|
t_Grams.store_integer_ref(cb, data.fwd_fee_remaining) && // fwd_fee_remaining:Grams
|
||||||
|
cb.store_ref_bool(data.msg))) { // msg:^Message
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (v2) {
|
||||||
|
if (!(cb.store_bool_bool((bool)data.emitted_lt) &&
|
||||||
|
(!data.emitted_lt || cb.store_long_bool(data.emitted_lt.value(), 64)))) { // emitted_lt:(Maybe uint64)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!(cb.store_bool_bool((bool)data.metadata) &&
|
||||||
|
(!data.metadata || data.metadata.value().pack(cb)))) { // metadata:(Maybe MsgMetadata)
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MsgEnvelope::get_created_lt(const vm::CellSlice& cs, unsigned long long& created_lt) const {
|
bool MsgEnvelope::pack_cell(td::Ref<vm::Cell>& cell, const Record_std& data) const {
|
||||||
|
vm::CellBuilder cb;
|
||||||
|
return pack(cb, data) && cb.finalize_to(cell);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MsgEnvelope::get_emitted_lt(const vm::CellSlice& cs, unsigned long long& emitted_lt) const {
|
||||||
|
// Emitted lt is emitted_lt from MsgEnvelope (if present), otherwise created_lt
|
||||||
if (!cs.size_refs()) {
|
if (!cs.size_refs()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
if (get_tag(cs) == 5) {
|
||||||
|
vm::CellSlice cs2 = cs;
|
||||||
|
// msg_envelope_v2#5 cur_addr:IntermediateAddress
|
||||||
|
// next_addr:IntermediateAddress fwd_fee_remaining:Grams
|
||||||
|
// msg:^(Message Any) emitted_lt:(Maybe uint64) ...
|
||||||
|
bool have_emitted_lt;
|
||||||
|
if (!(cs2.skip_first(4) && t_IntermediateAddress.skip(cs2) && t_IntermediateAddress.skip(cs2) &&
|
||||||
|
t_Grams.skip(cs2) && t_Ref_Message.skip(cs2) && cs2.fetch_bool_to(have_emitted_lt))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (have_emitted_lt) {
|
||||||
|
return cs2.fetch_ulong_bool(64, emitted_lt);
|
||||||
|
}
|
||||||
|
}
|
||||||
auto msg_cs = load_cell_slice(cs.prefetch_ref());
|
auto msg_cs = load_cell_slice(cs.prefetch_ref());
|
||||||
return t_Message.get_created_lt(msg_cs, created_lt);
|
return t_Message.get_created_lt(msg_cs, emitted_lt);
|
||||||
}
|
}
|
||||||
|
|
||||||
const MsgEnvelope t_MsgEnvelope;
|
const MsgEnvelope t_MsgEnvelope;
|
||||||
|
@ -1692,6 +1785,15 @@ bool InMsg::skip(vm::CellSlice& cs) const {
|
||||||
&& cs.advance(64) // transaction_id:uint64
|
&& cs.advance(64) // transaction_id:uint64
|
||||||
&& t_Grams.skip(cs) // fwd_fee:Grams
|
&& t_Grams.skip(cs) // fwd_fee:Grams
|
||||||
&& t_RefCell.skip(cs); // proof_delivered:^Cell
|
&& t_RefCell.skip(cs); // proof_delivered:^Cell
|
||||||
|
case msg_import_deferred_fin:
|
||||||
|
return cs.advance(5) // msg_import_deferred_fin$00100
|
||||||
|
&& t_Ref_MsgEnvelope.skip(cs) // in_msg:^MsgEnvelope
|
||||||
|
&& t_Ref_Transaction.skip(cs) // transaction:^Transaction
|
||||||
|
&& t_Grams.skip(cs); // fwd_fee:Grams
|
||||||
|
case msg_import_deferred_tr:
|
||||||
|
return cs.advance(5) // msg_import_deferred_tr$00101
|
||||||
|
&& t_Ref_MsgEnvelope.skip(cs) // in_msg:^MsgEnvelope
|
||||||
|
&& t_Ref_MsgEnvelope.skip(cs); // out_msg:^MsgEnvelope
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1734,12 +1836,22 @@ bool InMsg::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
||||||
&& cs.advance(64) // transaction_id:uint64
|
&& cs.advance(64) // transaction_id:uint64
|
||||||
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee:Grams
|
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee:Grams
|
||||||
&& t_RefCell.validate_skip(ops, cs, weak); // proof_delivered:^Cell
|
&& t_RefCell.validate_skip(ops, cs, weak); // proof_delivered:^Cell
|
||||||
|
case msg_import_deferred_fin:
|
||||||
|
return cs.advance(5) // msg_import_deferred_fin$00100
|
||||||
|
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // in_msg:^MsgEnvelope
|
||||||
|
&& t_Ref_Transaction.validate_skip(ops, cs, weak) // transaction:^Transaction
|
||||||
|
&& t_Grams.validate_skip(ops, cs, weak); // fwd_fee:Grams
|
||||||
|
case msg_import_deferred_tr:
|
||||||
|
return cs.advance(5) // msg_import_deferred_tr$00101
|
||||||
|
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // in_msg:^MsgEnvelope
|
||||||
|
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak); // out_msg:^MsgEnvelope
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
switch (get_tag(cs)) {
|
int tag = get_tag(cs);
|
||||||
|
switch (tag) {
|
||||||
case msg_import_ext: // inbound external message
|
case msg_import_ext: // inbound external message
|
||||||
return t_ImportFees.null_value(cb); // external messages have no value and no import fees
|
return t_ImportFees.null_value(cb); // external messages have no value and no import fees
|
||||||
case msg_import_ihr: // IHR-forwarded internal message to its final destination
|
case msg_import_ihr: // IHR-forwarded internal message to its final destination
|
||||||
|
@ -1765,8 +1877,9 @@ bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
&& t_CurrencyCollection.null_value(cb); // value_imported := 0
|
&& t_CurrencyCollection.null_value(cb); // value_imported := 0
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
case msg_import_fin: // internal message delivered to its final destination in this block
|
case msg_import_fin: // internal message delivered to its final destination in this block
|
||||||
if (cs.advance(3) && cs.size_refs() >= 2) {
|
case msg_import_deferred_fin: // internal message from DispatchQueue to its final destination in this block
|
||||||
|
if (cs.advance(tag == msg_import_fin ? 3 : 5) && cs.size_refs() >= 2) {
|
||||||
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
||||||
MsgEnvelope::Record in_msg;
|
MsgEnvelope::Record in_msg;
|
||||||
td::RefInt256 fwd_fee, fwd_fee_remaining, value_grams, ihr_fee;
|
td::RefInt256 fwd_fee, fwd_fee_remaining, value_grams, ihr_fee;
|
||||||
|
@ -1787,13 +1900,14 @@ bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
msg_info.value.write()); // value_imported = msg.value + msg.ihr_fee + fwd_fee_remaining
|
msg_info.value.write()); // value_imported = msg.value + msg.ihr_fee + fwd_fee_remaining
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
case msg_import_tr: // transit internal message
|
case msg_import_tr: // transit internal message
|
||||||
if (cs.advance(3) && cs.size_refs() >= 2) {
|
case msg_import_deferred_tr: // internal message from DispatchQueue to OutMsgQueue
|
||||||
|
if (cs.advance(tag == msg_import_tr ? 3 : 5) && cs.size_refs() >= 2) {
|
||||||
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
||||||
MsgEnvelope::Record in_msg;
|
MsgEnvelope::Record in_msg;
|
||||||
td::RefInt256 transit_fee, fwd_fee_remaining, value_grams, ihr_fee;
|
td::RefInt256 transit_fee = td::zero_refint(), fwd_fee_remaining, value_grams, ihr_fee;
|
||||||
if (!(t_MsgEnvelope.unpack(msg_env_cs, in_msg) && cs.fetch_ref().not_null() &&
|
if (!(t_MsgEnvelope.unpack(msg_env_cs, in_msg) && cs.fetch_ref().not_null() &&
|
||||||
t_Grams.as_integer_skip_to(cs, transit_fee) &&
|
(tag == msg_import_deferred_tr || t_Grams.as_integer_skip_to(cs, transit_fee)) &&
|
||||||
(fwd_fee_remaining = t_Grams.as_integer(in_msg.fwd_fee_remaining)).not_null() &&
|
(fwd_fee_remaining = t_Grams.as_integer(in_msg.fwd_fee_remaining)).not_null() &&
|
||||||
cmp(transit_fee, fwd_fee_remaining) <= 0)) {
|
cmp(transit_fee, fwd_fee_remaining) <= 0)) {
|
||||||
return false;
|
return false;
|
||||||
|
@ -1871,6 +1985,14 @@ bool OutMsg::skip(vm::CellSlice& cs) const {
|
||||||
return cs.advance(3) // msg_export_tr_req$111
|
return cs.advance(3) // msg_export_tr_req$111
|
||||||
&& t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope
|
&& t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope
|
||||||
&& RefTo<InMsg>{}.skip(cs); // imported:^InMsg
|
&& RefTo<InMsg>{}.skip(cs); // imported:^InMsg
|
||||||
|
case msg_export_new_defer:
|
||||||
|
return cs.advance(5) // msg_export_new_defer$10100
|
||||||
|
&& t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope
|
||||||
|
&& t_Ref_Transaction.skip(cs); // transaction:^Transaction
|
||||||
|
case msg_export_deferred_tr:
|
||||||
|
return cs.advance(5) // msg_export_deferred_tr$10101
|
||||||
|
&& t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope
|
||||||
|
&& RefTo<InMsg>{}.skip(cs); // imported:^InMsg
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1910,12 +2032,21 @@ bool OutMsg::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
||||||
return cs.advance(3) // msg_export_tr_req$111
|
return cs.advance(3) // msg_export_tr_req$111
|
||||||
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope
|
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope
|
||||||
&& RefTo<InMsg>{}.validate_skip(ops, cs, weak); // imported:^InMsg
|
&& RefTo<InMsg>{}.validate_skip(ops, cs, weak); // imported:^InMsg
|
||||||
|
case msg_export_new_defer:
|
||||||
|
return cs.advance(5) // msg_export_new_defer$10100
|
||||||
|
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope
|
||||||
|
&& t_Ref_Transaction.validate_skip(ops, cs, weak); // transaction:^Transaction
|
||||||
|
case msg_export_deferred_tr:
|
||||||
|
return cs.advance(5) // msg_export_deferred_tr$10101
|
||||||
|
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope
|
||||||
|
&& RefTo<InMsg>{}.validate_skip(ops, cs, weak); // imported:^InMsg
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
switch (get_tag(cs)) {
|
auto tag = get_tag(cs);
|
||||||
|
switch (tag) {
|
||||||
case msg_export_ext: // external outbound message carries no value
|
case msg_export_ext: // external outbound message carries no value
|
||||||
if (cs.have(3, 2)) {
|
if (cs.have(3, 2)) {
|
||||||
return t_CurrencyCollection.null_value(cb);
|
return t_CurrencyCollection.null_value(cb);
|
||||||
|
@ -1929,10 +2060,13 @@ bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
return cs.have(4 + 63, 1) && t_CurrencyCollection.null_value(cb);
|
return cs.have(4 + 63, 1) && t_CurrencyCollection.null_value(cb);
|
||||||
case msg_export_deq_short: // dequeueing record for outbound message, no exported value
|
case msg_export_deq_short: // dequeueing record for outbound message, no exported value
|
||||||
return cs.have(4 + 256 + 32 + 64 + 64) && t_CurrencyCollection.null_value(cb);
|
return cs.have(4 + 256 + 32 + 64 + 64) && t_CurrencyCollection.null_value(cb);
|
||||||
case msg_export_new: // newly-generated outbound internal message, queued
|
case msg_export_new: // newly-generated outbound internal message, queued
|
||||||
case msg_export_tr: // transit internal message, queued
|
case msg_export_tr: // transit internal message, queued
|
||||||
case msg_export_tr_req: // transit internal message, re-queued from this shardchain
|
case msg_export_tr_req: // transit internal message, re-queued from this shardchain
|
||||||
if (cs.advance(3) && cs.size_refs() >= 2) {
|
case msg_export_new_defer: // newly-generated outbound internal message, deferred
|
||||||
|
case msg_export_deferred_tr: // internal message from DispatchQueue, queued
|
||||||
|
int tag_len = (tag == msg_export_new_defer || tag == msg_export_deferred_tr) ? 5 : 3;
|
||||||
|
if (cs.advance(tag_len) && cs.size_refs() >= 2) {
|
||||||
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
||||||
MsgEnvelope::Record out_msg;
|
MsgEnvelope::Record out_msg;
|
||||||
if (!(cs.fetch_ref().not_null() && t_MsgEnvelope.unpack(msg_env_cs, out_msg))) {
|
if (!(cs.fetch_ref().not_null() && t_MsgEnvelope.unpack(msg_env_cs, out_msg))) {
|
||||||
|
@ -1954,12 +2088,12 @@ bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OutMsg::get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) const {
|
bool OutMsg::get_emitted_lt(vm::CellSlice& cs, unsigned long long& emitted_lt) const {
|
||||||
switch (get_tag(cs)) {
|
switch (get_tag(cs)) {
|
||||||
case msg_export_ext:
|
case msg_export_ext:
|
||||||
if (cs.have(3, 1)) {
|
if (cs.have(3, 1)) {
|
||||||
auto msg_cs = load_cell_slice(cs.prefetch_ref());
|
auto msg_cs = load_cell_slice(cs.prefetch_ref());
|
||||||
return t_Message.get_created_lt(msg_cs, created_lt);
|
return t_Message.get_created_lt(msg_cs, emitted_lt);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -1970,9 +2104,11 @@ bool OutMsg::get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) c
|
||||||
case msg_export_deq_short:
|
case msg_export_deq_short:
|
||||||
case msg_export_deq_imm:
|
case msg_export_deq_imm:
|
||||||
case msg_export_tr_req:
|
case msg_export_tr_req:
|
||||||
|
case msg_export_new_defer:
|
||||||
|
case msg_export_deferred_tr:
|
||||||
if (cs.have(3, 1)) {
|
if (cs.have(3, 1)) {
|
||||||
auto out_msg_cs = load_cell_slice(cs.prefetch_ref());
|
auto out_msg_cs = load_cell_slice(cs.prefetch_ref());
|
||||||
return t_MsgEnvelope.get_created_lt(out_msg_cs, created_lt);
|
return t_MsgEnvelope.get_emitted_lt(out_msg_cs, emitted_lt);
|
||||||
} else {
|
} else {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -2003,26 +2139,53 @@ bool Aug_OutMsgQueue::eval_empty(vm::CellBuilder& cb) const {
|
||||||
|
|
||||||
bool Aug_OutMsgQueue::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
bool Aug_OutMsgQueue::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
Ref<vm::Cell> msg_env;
|
Ref<vm::Cell> msg_env;
|
||||||
unsigned long long created_lt;
|
unsigned long long emitted_lt;
|
||||||
return cs.fetch_ref_to(msg_env) && t_MsgEnvelope.get_created_lt(load_cell_slice(std::move(msg_env)), created_lt) &&
|
return cs.fetch_ref_to(msg_env) && t_MsgEnvelope.get_emitted_lt(load_cell_slice(std::move(msg_env)), emitted_lt) &&
|
||||||
cb.store_ulong_rchk_bool(created_lt, 64);
|
cb.store_ulong_rchk_bool(emitted_lt, 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Aug_DispatchQueue::eval_fork(vm::CellBuilder& cb, vm::CellSlice& left_cs, vm::CellSlice& right_cs) const {
|
||||||
|
unsigned long long x, y;
|
||||||
|
return left_cs.fetch_ulong_bool(64, x) && right_cs.fetch_ulong_bool(64, y) &&
|
||||||
|
cb.store_ulong_rchk_bool(std::min(x, y), 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Aug_DispatchQueue::eval_empty(vm::CellBuilder& cb) const {
|
||||||
|
return cb.store_long_bool(0, 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Aug_DispatchQueue::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||||
|
Ref<vm::Cell> messages_root;
|
||||||
|
if (!cs.fetch_maybe_ref(messages_root)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
vm::Dictionary messages{std::move(messages_root), 64};
|
||||||
|
td::BitArray<64> key_buffer;
|
||||||
|
td::uint64 key;
|
||||||
|
if (messages.get_minmax_key(key_buffer.bits(), 64).is_null()) {
|
||||||
|
key = (td::uint64)-1;
|
||||||
|
} else {
|
||||||
|
key = key_buffer.to_ulong();
|
||||||
|
}
|
||||||
|
return cb.store_long_bool(key, 64);
|
||||||
}
|
}
|
||||||
|
|
||||||
const Aug_OutMsgQueue aug_OutMsgQueue;
|
const Aug_OutMsgQueue aug_OutMsgQueue;
|
||||||
|
const Aug_DispatchQueue aug_DispatchQueue;
|
||||||
const OutMsgQueue t_OutMsgQueue;
|
const OutMsgQueue t_OutMsgQueue;
|
||||||
|
|
||||||
const ProcessedUpto t_ProcessedUpto;
|
const ProcessedUpto t_ProcessedUpto;
|
||||||
const HashmapE t_ProcessedInfo{96, t_ProcessedUpto};
|
const HashmapE t_ProcessedInfo{96, t_ProcessedUpto};
|
||||||
const HashmapE t_IhrPendingInfo{256, t_uint128};
|
const HashmapE t_IhrPendingInfo{256, t_uint128};
|
||||||
|
|
||||||
// _ out_queue:OutMsgQueue proc_info:ProcessedInfo = OutMsgQueueInfo;
|
// _ out_queue:OutMsgQueue proc_info:ProcessedInfo extra:(Maybe OutMsgQueueExtra) = OutMsgQueueInfo;
|
||||||
bool OutMsgQueueInfo::skip(vm::CellSlice& cs) const {
|
bool OutMsgQueueInfo::skip(vm::CellSlice& cs) const {
|
||||||
return t_OutMsgQueue.skip(cs) && t_ProcessedInfo.skip(cs) && t_IhrPendingInfo.skip(cs);
|
return t_OutMsgQueue.skip(cs) && t_ProcessedInfo.skip(cs) && Maybe<gen::OutMsgQueueExtra>().skip(cs);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool OutMsgQueueInfo::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
bool OutMsgQueueInfo::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
||||||
return t_OutMsgQueue.validate_skip(ops, cs, weak) && t_ProcessedInfo.validate_skip(ops, cs, weak) &&
|
return t_OutMsgQueue.validate_skip(ops, cs, weak) && t_ProcessedInfo.validate_skip(ops, cs, weak) &&
|
||||||
t_IhrPendingInfo.validate_skip(ops, cs, weak);
|
Maybe<gen::OutMsgQueueExtra>().validate_skip(ops, cs, weak);
|
||||||
}
|
}
|
||||||
|
|
||||||
const OutMsgQueueInfo t_OutMsgQueueInfo;
|
const OutMsgQueueInfo t_OutMsgQueueInfo;
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "td/utils/bits.h"
|
#include "td/utils/bits.h"
|
||||||
#include "td/utils/StringBuilder.h"
|
#include "td/utils/StringBuilder.h"
|
||||||
#include "ton/ton-types.h"
|
#include "ton/ton-types.h"
|
||||||
|
#include "block-auto.h"
|
||||||
|
|
||||||
namespace block {
|
namespace block {
|
||||||
|
|
||||||
|
@ -469,11 +470,17 @@ struct MsgEnvelope final : TLB_Complex {
|
||||||
int cur_addr, next_addr;
|
int cur_addr, next_addr;
|
||||||
td::RefInt256 fwd_fee_remaining;
|
td::RefInt256 fwd_fee_remaining;
|
||||||
Ref<vm::Cell> msg;
|
Ref<vm::Cell> msg;
|
||||||
|
td::optional<ton::LogicalTime> emitted_lt;
|
||||||
|
td::optional<MsgMetadata> metadata;
|
||||||
};
|
};
|
||||||
bool unpack(vm::CellSlice& cs, Record& data) const;
|
bool unpack(vm::CellSlice& cs, Record& data) const;
|
||||||
bool unpack(vm::CellSlice& cs, Record_std& data) const;
|
bool unpack(vm::CellSlice& cs, Record_std& data) const;
|
||||||
bool unpack_std(vm::CellSlice& cs, int& cur_a, int& nhop_a, Ref<vm::Cell>& msg) const;
|
bool pack(vm::CellBuilder& cb, const Record_std& data) const;
|
||||||
bool get_created_lt(const vm::CellSlice& cs, unsigned long long& created_lt) const;
|
bool pack_cell(td::Ref<vm::Cell>& cell, const Record_std& data) const;
|
||||||
|
bool get_emitted_lt(const vm::CellSlice& cs, unsigned long long& emitted_lt) const;
|
||||||
|
int get_tag(const vm::CellSlice& cs) const override {
|
||||||
|
return (int)cs.prefetch_ulong(4);
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const MsgEnvelope t_MsgEnvelope;
|
extern const MsgEnvelope t_MsgEnvelope;
|
||||||
|
@ -801,12 +808,18 @@ struct InMsg final : TLB_Complex {
|
||||||
msg_import_fin = 4,
|
msg_import_fin = 4,
|
||||||
msg_import_tr = 5,
|
msg_import_tr = 5,
|
||||||
msg_discard_fin = 6,
|
msg_discard_fin = 6,
|
||||||
msg_discard_tr = 7
|
msg_discard_tr = 7,
|
||||||
|
msg_import_deferred_fin = 8,
|
||||||
|
msg_import_deferred_tr = 9
|
||||||
};
|
};
|
||||||
bool skip(vm::CellSlice& cs) const override;
|
bool skip(vm::CellSlice& cs) const override;
|
||||||
bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override;
|
bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override;
|
||||||
int get_tag(const vm::CellSlice& cs) const override {
|
int get_tag(const vm::CellSlice& cs) const override {
|
||||||
return (int)cs.prefetch_ulong(3);
|
int tag = (int)cs.prefetch_ulong(3);
|
||||||
|
if (tag != 1) {
|
||||||
|
return tag;
|
||||||
|
}
|
||||||
|
return (int)cs.prefetch_ulong(5) - 0b00100 + 8;
|
||||||
}
|
}
|
||||||
bool get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const;
|
bool get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const;
|
||||||
};
|
};
|
||||||
|
@ -822,16 +835,24 @@ struct OutMsg final : TLB_Complex {
|
||||||
msg_export_deq_imm = 4,
|
msg_export_deq_imm = 4,
|
||||||
msg_export_deq = 12,
|
msg_export_deq = 12,
|
||||||
msg_export_deq_short = 13,
|
msg_export_deq_short = 13,
|
||||||
msg_export_tr_req = 7
|
msg_export_tr_req = 7,
|
||||||
|
msg_export_new_defer = 20, // 0b10100
|
||||||
|
msg_export_deferred_tr = 21 // 0b10101
|
||||||
};
|
};
|
||||||
bool skip(vm::CellSlice& cs) const override;
|
bool skip(vm::CellSlice& cs) const override;
|
||||||
bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override;
|
bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override;
|
||||||
int get_tag(const vm::CellSlice& cs) const override {
|
int get_tag(const vm::CellSlice& cs) const override {
|
||||||
int t = (int)cs.prefetch_ulong(3);
|
int t = (int)cs.prefetch_ulong(3);
|
||||||
return t != 6 ? t : (int)cs.prefetch_ulong(4);
|
if (t == 6) {
|
||||||
|
return (int)cs.prefetch_ulong(4);
|
||||||
|
}
|
||||||
|
if (t == 5) {
|
||||||
|
return (int)cs.prefetch_ulong(5);
|
||||||
|
}
|
||||||
|
return t;
|
||||||
}
|
}
|
||||||
bool get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const;
|
bool get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const;
|
||||||
bool get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) const;
|
bool get_emitted_lt(vm::CellSlice& cs, unsigned long long& emitted_lt) const;
|
||||||
};
|
};
|
||||||
|
|
||||||
extern const OutMsg t_OutMsg;
|
extern const OutMsg t_OutMsg;
|
||||||
|
@ -909,6 +930,16 @@ struct Aug_OutMsgQueue final : AugmentationCheckData {
|
||||||
|
|
||||||
extern const Aug_OutMsgQueue aug_OutMsgQueue;
|
extern const Aug_OutMsgQueue aug_OutMsgQueue;
|
||||||
|
|
||||||
|
struct Aug_DispatchQueue final : AugmentationCheckData {
|
||||||
|
Aug_DispatchQueue() : AugmentationCheckData(gen::t_AccountDispatchQueue, t_uint64) {
|
||||||
|
}
|
||||||
|
bool eval_fork(vm::CellBuilder& cb, vm::CellSlice& left_cs, vm::CellSlice& right_cs) const override;
|
||||||
|
bool eval_empty(vm::CellBuilder& cb) const override;
|
||||||
|
bool eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const override;
|
||||||
|
};
|
||||||
|
|
||||||
|
extern const Aug_DispatchQueue aug_DispatchQueue;
|
||||||
|
|
||||||
struct OutMsgQueue final : TLB_Complex {
|
struct OutMsgQueue final : TLB_Complex {
|
||||||
HashmapAugE dict_type;
|
HashmapAugE dict_type;
|
||||||
OutMsgQueue() : dict_type(32 + 64 + 256, aug_OutMsgQueue){};
|
OutMsgQueue() : dict_type(32 + 64 + 256, aug_OutMsgQueue){};
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include "td/utils/tl_storers.h"
|
#include "td/utils/tl_storers.h"
|
||||||
#include "td/utils/misc.h"
|
#include "td/utils/misc.h"
|
||||||
#include "td/utils/Random.h"
|
#include "td/utils/Random.h"
|
||||||
|
#include "vm/fmt.hpp"
|
||||||
|
|
||||||
namespace block {
|
namespace block {
|
||||||
using namespace std::literals::string_literals;
|
using namespace std::literals::string_literals;
|
||||||
|
@ -642,7 +643,11 @@ bool EnqueuedMsgDescr::unpack(vm::CellSlice& cs) {
|
||||||
}
|
}
|
||||||
cur_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.cur_addr);
|
cur_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.cur_addr);
|
||||||
next_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.next_addr);
|
next_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.next_addr);
|
||||||
lt_ = info.created_lt;
|
unsigned long long lt;
|
||||||
|
if (!tlb::t_MsgEnvelope.get_emitted_lt(vm::load_cell_slice(enq.out_msg), lt)) {
|
||||||
|
return invalidate();
|
||||||
|
}
|
||||||
|
lt_ = lt;
|
||||||
enqueued_lt_ = enq.enqueued_lt;
|
enqueued_lt_ = enq.enqueued_lt;
|
||||||
hash_ = env.msg->get_hash().bits();
|
hash_ = env.msg->get_hash().bits();
|
||||||
msg_ = std::move(env.msg);
|
msg_ = std::move(env.msg);
|
||||||
|
@ -858,12 +863,20 @@ td::Status ShardState::unpack_out_msg_queue_info(Ref<vm::Cell> out_msg_queue_inf
|
||||||
return td::Status::Error(
|
return td::Status::Error(
|
||||||
-666, "ProcessedInfo in the state of "s + id_.to_str() + " is invalid according to automated validity checks");
|
-666, "ProcessedInfo in the state of "s + id_.to_str() + " is invalid according to automated validity checks");
|
||||||
}
|
}
|
||||||
if (!block::gen::t_IhrPendingInfo.validate_csr(1024, qinfo.ihr_pending)) {
|
|
||||||
return td::Status::Error(
|
|
||||||
-666, "IhrPendingInfo in the state of "s + id_.to_str() + " is invalid according to automated validity checks");
|
|
||||||
}
|
|
||||||
processed_upto_ = block::MsgProcessedUptoCollection::unpack(ton::ShardIdFull(id_), std::move(qinfo.proc_info));
|
processed_upto_ = block::MsgProcessedUptoCollection::unpack(ton::ShardIdFull(id_), std::move(qinfo.proc_info));
|
||||||
ihr_pending_ = std::make_unique<vm::Dictionary>(std::move(qinfo.ihr_pending), 320);
|
ihr_pending_ = std::make_unique<vm::Dictionary>(320);
|
||||||
|
if (qinfo.extra.write().fetch_long(1)) {
|
||||||
|
block::gen::OutMsgQueueExtra::Record extra;
|
||||||
|
if (!block::tlb::csr_unpack(qinfo.extra, extra)) {
|
||||||
|
return td::Status::Error(-666, "cannot unpack OutMsgQueueExtre in the state of "s + id_.to_str());
|
||||||
|
}
|
||||||
|
dispatch_queue_ = std::make_unique<vm::AugmentedDictionary>(extra.dispatch_queue, 256, tlb::aug_DispatchQueue);
|
||||||
|
if (extra.out_queue_size.write().fetch_long(1)) {
|
||||||
|
out_msg_queue_size_ = extra.out_queue_size->prefetch_ulong(48);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dispatch_queue_ = std::make_unique<vm::AugmentedDictionary>(256, tlb::aug_DispatchQueue);
|
||||||
|
}
|
||||||
auto shard1 = id_.shard_full();
|
auto shard1 = id_.shard_full();
|
||||||
td::BitArray<64> pfx{(long long)shard1.shard};
|
td::BitArray<64> pfx{(long long)shard1.shard};
|
||||||
int pfx_len = shard_prefix_length(shard1);
|
int pfx_len = shard_prefix_length(shard1);
|
||||||
|
@ -994,6 +1007,17 @@ td::Status ShardState::merge_with(ShardState& sib) {
|
||||||
underload_history_ = overload_history_ = 0;
|
underload_history_ = overload_history_ = 0;
|
||||||
// 10. compute vert_seqno
|
// 10. compute vert_seqno
|
||||||
vert_seqno_ = std::max(vert_seqno_, sib.vert_seqno_);
|
vert_seqno_ = std::max(vert_seqno_, sib.vert_seqno_);
|
||||||
|
// 11. merge dispatch_queue (same as account dict)
|
||||||
|
if (!dispatch_queue_->combine_with(*sib.dispatch_queue_)) {
|
||||||
|
return td::Status::Error(-666, "cannot merge dispatch queues of the two ancestors");
|
||||||
|
}
|
||||||
|
sib.dispatch_queue_.reset();
|
||||||
|
// 11. merge out_msg_queue_size
|
||||||
|
if (out_msg_queue_size_ && sib.out_msg_queue_size_) {
|
||||||
|
out_msg_queue_size_.value() += sib.out_msg_queue_size_.value();
|
||||||
|
} else {
|
||||||
|
out_msg_queue_size_ = {};
|
||||||
|
}
|
||||||
// Anything else? add here
|
// Anything else? add here
|
||||||
// ...
|
// ...
|
||||||
|
|
||||||
|
@ -1009,8 +1033,8 @@ td::Status ShardState::merge_with(ShardState& sib) {
|
||||||
return td::Status::OK();
|
return td::Status::OK();
|
||||||
}
|
}
|
||||||
|
|
||||||
td::Result<std::unique_ptr<vm::AugmentedDictionary>> ShardState::compute_split_out_msg_queue(ton::ShardIdFull subshard,
|
td::Result<std::unique_ptr<vm::AugmentedDictionary>> ShardState::compute_split_out_msg_queue(
|
||||||
td::uint32* queue_size) {
|
ton::ShardIdFull subshard) {
|
||||||
auto shard = id_.shard_full();
|
auto shard = id_.shard_full();
|
||||||
if (!ton::shard_is_parent(shard, subshard)) {
|
if (!ton::shard_is_parent(shard, subshard)) {
|
||||||
return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() +
|
return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() +
|
||||||
|
@ -1018,7 +1042,7 @@ td::Result<std::unique_ptr<vm::AugmentedDictionary>> ShardState::compute_split_o
|
||||||
}
|
}
|
||||||
CHECK(out_msg_queue_);
|
CHECK(out_msg_queue_);
|
||||||
auto subqueue = std::make_unique<vm::AugmentedDictionary>(*out_msg_queue_);
|
auto subqueue = std::make_unique<vm::AugmentedDictionary>(*out_msg_queue_);
|
||||||
int res = block::filter_out_msg_queue(*subqueue, shard, subshard, queue_size);
|
int res = block::filter_out_msg_queue(*subqueue, shard, subshard);
|
||||||
if (res < 0) {
|
if (res < 0) {
|
||||||
return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str());
|
return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str());
|
||||||
}
|
}
|
||||||
|
@ -1040,7 +1064,7 @@ td::Result<std::shared_ptr<block::MsgProcessedUptoCollection>> ShardState::compu
|
||||||
return std::move(sub_processed_upto);
|
return std::move(sub_processed_upto);
|
||||||
}
|
}
|
||||||
|
|
||||||
td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) {
|
td::Status ShardState::split(ton::ShardIdFull subshard) {
|
||||||
if (!ton::shard_is_parent(id_.shard_full(), subshard)) {
|
if (!ton::shard_is_parent(id_.shard_full(), subshard)) {
|
||||||
return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() +
|
return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() +
|
||||||
" because it is not a parent");
|
" because it is not a parent");
|
||||||
|
@ -1058,10 +1082,12 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size)
|
||||||
auto shard1 = id_.shard_full();
|
auto shard1 = id_.shard_full();
|
||||||
CHECK(ton::shard_is_parent(shard1, subshard));
|
CHECK(ton::shard_is_parent(shard1, subshard));
|
||||||
CHECK(out_msg_queue_);
|
CHECK(out_msg_queue_);
|
||||||
int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, queue_size);
|
td::uint64 queue_size;
|
||||||
|
int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, &queue_size);
|
||||||
if (res1 < 0) {
|
if (res1 < 0) {
|
||||||
return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str());
|
return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str());
|
||||||
}
|
}
|
||||||
|
out_msg_queue_size_ = queue_size;
|
||||||
LOG(DEBUG) << "split counters: " << res1;
|
LOG(DEBUG) << "split counters: " << res1;
|
||||||
// 3. processed_upto
|
// 3. processed_upto
|
||||||
LOG(DEBUG) << "splitting ProcessedUpto";
|
LOG(DEBUG) << "splitting ProcessedUpto";
|
||||||
|
@ -1091,6 +1117,11 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size)
|
||||||
// NB: if total_fees_extra will be allowed to be non-empty, split it here too
|
// NB: if total_fees_extra will be allowed to be non-empty, split it here too
|
||||||
// 7. reset overload/underload history
|
// 7. reset overload/underload history
|
||||||
overload_history_ = underload_history_ = 0;
|
overload_history_ = underload_history_ = 0;
|
||||||
|
// 8. split dispatch_queue (same as account dict)
|
||||||
|
LOG(DEBUG) << "splitting dispatch_queue";
|
||||||
|
CHECK(dispatch_queue_);
|
||||||
|
CHECK(dispatch_queue_->cut_prefix_subdict(pfx.bits(), pfx_len));
|
||||||
|
CHECK(dispatch_queue_->has_common_prefix(pfx.bits(), pfx_len));
|
||||||
// 999. anything else?
|
// 999. anything else?
|
||||||
id_.id.shard = subshard.shard;
|
id_.id.shard = subshard.shard;
|
||||||
id_.file_hash.set_zero();
|
id_.file_hash.set_zero();
|
||||||
|
@ -1099,7 +1130,7 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size)
|
||||||
}
|
}
|
||||||
|
|
||||||
int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard,
|
int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard,
|
||||||
td::uint32* queue_size) {
|
td::uint64* queue_size) {
|
||||||
if (queue_size) {
|
if (queue_size) {
|
||||||
*queue_size = 0;
|
*queue_size = 0;
|
||||||
}
|
}
|
||||||
|
@ -1390,7 +1421,7 @@ bool ValueFlow::store(vm::CellBuilder& cb) const {
|
||||||
&& exported.store(cb2) // exported:CurrencyCollection
|
&& exported.store(cb2) // exported:CurrencyCollection
|
||||||
&& cb.store_ref_bool(cb2.finalize()) // ]
|
&& cb.store_ref_bool(cb2.finalize()) // ]
|
||||||
&& fees_collected.store(cb) // fees_collected:CurrencyCollection
|
&& fees_collected.store(cb) // fees_collected:CurrencyCollection
|
||||||
&& (burned.is_zero() || burned.store(cb)) // fees_burned:CurrencyCollection
|
&& (burned.is_zero() || burned.store(cb)) // fees_burned:CurrencyCollection
|
||||||
&& fees_imported.store(cb2) // ^[ fees_imported:CurrencyCollection
|
&& fees_imported.store(cb2) // ^[ fees_imported:CurrencyCollection
|
||||||
&& recovered.store(cb2) // recovered:CurrencyCollection
|
&& recovered.store(cb2) // recovered:CurrencyCollection
|
||||||
&& created.store(cb2) // created:CurrencyCollection
|
&& created.store(cb2) // created:CurrencyCollection
|
||||||
|
@ -1419,8 +1450,7 @@ bool ValueFlow::fetch(vm::CellSlice& cs) {
|
||||||
from_prev_blk.validate_unpack(std::move(f2.r1.from_prev_blk)) &&
|
from_prev_blk.validate_unpack(std::move(f2.r1.from_prev_blk)) &&
|
||||||
to_next_blk.validate_unpack(std::move(f2.r1.to_next_blk)) &&
|
to_next_blk.validate_unpack(std::move(f2.r1.to_next_blk)) &&
|
||||||
imported.validate_unpack(std::move(f2.r1.imported)) && exported.validate_unpack(std::move(f2.r1.exported)) &&
|
imported.validate_unpack(std::move(f2.r1.imported)) && exported.validate_unpack(std::move(f2.r1.exported)) &&
|
||||||
fees_collected.validate_unpack(std::move(f2.fees_collected)) &&
|
fees_collected.validate_unpack(std::move(f2.fees_collected)) && burned.validate_unpack(std::move(f2.burned)) &&
|
||||||
burned.validate_unpack(std::move(f2.burned)) &&
|
|
||||||
fees_imported.validate_unpack(std::move(f2.r2.fees_imported)) &&
|
fees_imported.validate_unpack(std::move(f2.r2.fees_imported)) &&
|
||||||
recovered.validate_unpack(std::move(f2.r2.recovered)) && created.validate_unpack(std::move(f2.r2.created)) &&
|
recovered.validate_unpack(std::move(f2.r2.recovered)) && created.validate_unpack(std::move(f2.r2.created)) &&
|
||||||
minted.validate_unpack(std::move(f2.r2.minted))) {
|
minted.validate_unpack(std::move(f2.r2.minted))) {
|
||||||
|
@ -2305,4 +2335,132 @@ bool parse_block_id_ext(td::Slice str, ton::BlockIdExt& blkid) {
|
||||||
return parse_block_id_ext(str.begin(), str.end(), blkid);
|
return parse_block_id_ext(str.begin(), str.end(), blkid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool unpack_account_dispatch_queue(Ref<vm::CellSlice> csr, vm::Dictionary& dict, td::uint64& dict_size) {
|
||||||
|
if (csr.not_null()) {
|
||||||
|
block::gen::AccountDispatchQueue::Record rec;
|
||||||
|
if (!block::tlb::csr_unpack(std::move(csr), rec)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
dict = vm::Dictionary{rec.messages, 64};
|
||||||
|
dict_size = rec.count;
|
||||||
|
if (dict_size == 0 || dict.is_empty()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
dict = vm::Dictionary{64};
|
||||||
|
dict_size = 0;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ref<vm::CellSlice> pack_account_dispatch_queue(const vm::Dictionary& dict, td::uint64 dict_size) {
|
||||||
|
if (dict_size == 0) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
// _ messages:(HashmapE 64 EnqueuedMsg) count:uint48 = AccountDispatchQueue;
|
||||||
|
vm::CellBuilder cb;
|
||||||
|
CHECK(dict.append_dict_to_bool(cb));
|
||||||
|
cb.store_long(dict_size, 48);
|
||||||
|
return cb.as_cellslice_ref();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ref<vm::CellSlice> get_dispatch_queue_min_lt_account(const vm::AugmentedDictionary& dispatch_queue,
|
||||||
|
ton::StdSmcAddress& addr) {
|
||||||
|
// TODO: This can be done more effectively
|
||||||
|
vm::AugmentedDictionary queue{dispatch_queue.get_root(), 256, tlb::aug_DispatchQueue};
|
||||||
|
if (queue.is_empty()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
auto root_extra = queue.get_root_extra();
|
||||||
|
if (root_extra.is_null()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
ton::LogicalTime min_lt = root_extra->prefetch_long(64);
|
||||||
|
while (true) {
|
||||||
|
td::Bits256 key;
|
||||||
|
int pfx_len = queue.get_common_prefix(key.bits(), 256);
|
||||||
|
if (pfx_len < 0) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
if (pfx_len == 256) {
|
||||||
|
addr = key;
|
||||||
|
return queue.lookup(key);
|
||||||
|
}
|
||||||
|
key[pfx_len] = false;
|
||||||
|
vm::AugmentedDictionary queue_cut{queue.get_root(), 256, tlb::aug_DispatchQueue};
|
||||||
|
if (!queue_cut.cut_prefix_subdict(key.bits(), pfx_len + 1)) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
root_extra = queue_cut.get_root_extra();
|
||||||
|
if (root_extra.is_null()) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
ton::LogicalTime cut_min_lt = root_extra->prefetch_long(64);
|
||||||
|
if (cut_min_lt != min_lt) {
|
||||||
|
key[pfx_len] = true;
|
||||||
|
}
|
||||||
|
if (!queue.cut_prefix_subdict(key.bits(), pfx_len + 1)) {
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool remove_dispatch_queue_entry(vm::AugmentedDictionary& dispatch_queue, const ton::StdSmcAddress& addr,
|
||||||
|
ton::LogicalTime lt) {
|
||||||
|
auto account_dispatch_queue = dispatch_queue.lookup(addr);
|
||||||
|
if (account_dispatch_queue.is_null()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
vm::Dictionary dict{64};
|
||||||
|
td::uint64 dict_size;
|
||||||
|
if (!unpack_account_dispatch_queue(std::move(account_dispatch_queue), dict, dict_size)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
td::BitArray<64> key;
|
||||||
|
key.store_ulong(lt);
|
||||||
|
auto entry = dict.lookup_delete(key);
|
||||||
|
if (entry.is_null()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
--dict_size;
|
||||||
|
account_dispatch_queue = pack_account_dispatch_queue(dict, dict_size);
|
||||||
|
if (account_dispatch_queue.not_null()) {
|
||||||
|
dispatch_queue.set(addr, account_dispatch_queue);
|
||||||
|
} else {
|
||||||
|
dispatch_queue.lookup_delete(addr);
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MsgMetadata::unpack(vm::CellSlice& cs) {
|
||||||
|
// msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata;
|
||||||
|
int tag;
|
||||||
|
return cs.fetch_int_to(4, tag) && tag == 0 && cs.fetch_uint_to(32, depth) &&
|
||||||
|
cs.prefetch_ulong(3) == 0b100 && // std address, no anycast
|
||||||
|
tlb::t_MsgAddressInt.extract_std_address(cs, initiator_wc, initiator_addr) &&
|
||||||
|
cs.fetch_uint_to(64, initiator_lt);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MsgMetadata::pack(vm::CellBuilder& cb) const {
|
||||||
|
// msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata;
|
||||||
|
return cb.store_long_bool(0, 4) && cb.store_long_bool(depth, 32) &&
|
||||||
|
tlb::t_MsgAddressInt.store_std_address(cb, initiator_wc, initiator_addr) &&
|
||||||
|
cb.store_long_bool(initiator_lt, 64);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string MsgMetadata::to_str() const {
|
||||||
|
return PSTRING() << "[ depth=" << depth << " init=" << initiator_wc << ":" << initiator_addr.to_hex() << ":"
|
||||||
|
<< initiator_lt << " ]";
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MsgMetadata::operator==(const MsgMetadata& other) const {
|
||||||
|
return depth == other.depth && initiator_wc == other.initiator_wc && initiator_addr == other.initiator_addr &&
|
||||||
|
initiator_lt == other.initiator_lt;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool MsgMetadata::operator!=(const MsgMetadata& other) const {
|
||||||
|
return !(*this == other);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
} // namespace block
|
} // namespace block
|
||||||
|
|
|
@ -417,6 +417,8 @@ struct ShardState {
|
||||||
std::unique_ptr<vm::Dictionary> ihr_pending_;
|
std::unique_ptr<vm::Dictionary> ihr_pending_;
|
||||||
std::unique_ptr<vm::Dictionary> block_create_stats_;
|
std::unique_ptr<vm::Dictionary> block_create_stats_;
|
||||||
std::shared_ptr<block::MsgProcessedUptoCollection> processed_upto_;
|
std::shared_ptr<block::MsgProcessedUptoCollection> processed_upto_;
|
||||||
|
std::unique_ptr<vm::AugmentedDictionary> dispatch_queue_;
|
||||||
|
td::optional<td::uint64> out_msg_queue_size_;
|
||||||
|
|
||||||
bool is_valid() const {
|
bool is_valid() const {
|
||||||
return id_.is_valid();
|
return id_.is_valid();
|
||||||
|
@ -433,11 +435,10 @@ struct ShardState {
|
||||||
ton::BlockSeqno prev_mc_block_seqno, bool after_split, bool clear_history,
|
ton::BlockSeqno prev_mc_block_seqno, bool after_split, bool clear_history,
|
||||||
std::function<bool(ton::BlockSeqno)> for_each_mcseqno);
|
std::function<bool(ton::BlockSeqno)> for_each_mcseqno);
|
||||||
td::Status merge_with(ShardState& sib);
|
td::Status merge_with(ShardState& sib);
|
||||||
td::Result<std::unique_ptr<vm::AugmentedDictionary>> compute_split_out_msg_queue(ton::ShardIdFull subshard,
|
td::Result<std::unique_ptr<vm::AugmentedDictionary>> compute_split_out_msg_queue(ton::ShardIdFull subshard);
|
||||||
td::uint32* queue_size = nullptr);
|
|
||||||
td::Result<std::shared_ptr<block::MsgProcessedUptoCollection>> compute_split_processed_upto(
|
td::Result<std::shared_ptr<block::MsgProcessedUptoCollection>> compute_split_processed_upto(
|
||||||
ton::ShardIdFull subshard);
|
ton::ShardIdFull subshard);
|
||||||
td::Status split(ton::ShardIdFull subshard, td::uint32* queue_size = nullptr);
|
td::Status split(ton::ShardIdFull subshard);
|
||||||
td::Status unpack_out_msg_queue_info(Ref<vm::Cell> out_msg_queue_info);
|
td::Status unpack_out_msg_queue_info(Ref<vm::Cell> out_msg_queue_info);
|
||||||
bool clear_load_history() {
|
bool clear_load_history() {
|
||||||
overload_history_ = underload_history_ = 0;
|
overload_history_ = underload_history_ = 0;
|
||||||
|
@ -658,7 +659,7 @@ class MtCarloComputeShare {
|
||||||
};
|
};
|
||||||
|
|
||||||
int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard,
|
int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard,
|
||||||
td::uint32* queue_size = nullptr);
|
td::uint64* queue_size = nullptr);
|
||||||
|
|
||||||
std::ostream& operator<<(std::ostream& os, const ShardId& shard_id);
|
std::ostream& operator<<(std::ostream& os, const ShardId& shard_id);
|
||||||
|
|
||||||
|
@ -749,4 +750,25 @@ bool parse_hex_hash(td::Slice str, td::Bits256& hash);
|
||||||
bool parse_block_id_ext(const char* str, const char* end, ton::BlockIdExt& blkid);
|
bool parse_block_id_ext(const char* str, const char* end, ton::BlockIdExt& blkid);
|
||||||
bool parse_block_id_ext(td::Slice str, ton::BlockIdExt& blkid);
|
bool parse_block_id_ext(td::Slice str, ton::BlockIdExt& blkid);
|
||||||
|
|
||||||
|
bool unpack_account_dispatch_queue(Ref<vm::CellSlice> csr, vm::Dictionary& dict, td::uint64& dict_size);
|
||||||
|
Ref<vm::CellSlice> pack_account_dispatch_queue(const vm::Dictionary& dict, td::uint64 dict_size);
|
||||||
|
Ref<vm::CellSlice> get_dispatch_queue_min_lt_account(const vm::AugmentedDictionary& dispatch_queue,
|
||||||
|
ton::StdSmcAddress& addr);
|
||||||
|
bool remove_dispatch_queue_entry(vm::AugmentedDictionary& dispatch_queue, const ton::StdSmcAddress& addr,
|
||||||
|
ton::LogicalTime lt);
|
||||||
|
|
||||||
|
struct MsgMetadata {
|
||||||
|
td::uint32 depth;
|
||||||
|
ton::WorkchainId initiator_wc;
|
||||||
|
ton::StdSmcAddress initiator_addr;
|
||||||
|
ton::LogicalTime initiator_lt;
|
||||||
|
|
||||||
|
bool unpack(vm::CellSlice& cs);
|
||||||
|
bool pack(vm::CellBuilder& cb) const;
|
||||||
|
std::string to_str() const;
|
||||||
|
|
||||||
|
bool operator==(const MsgMetadata& other) const;
|
||||||
|
bool operator!=(const MsgMetadata& other) const;
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace block
|
} // namespace block
|
||||||
|
|
|
@ -172,6 +172,12 @@ interm_addr_ext$11 workchain_id:int32 addr_pfx:uint64
|
||||||
msg_envelope#4 cur_addr:IntermediateAddress
|
msg_envelope#4 cur_addr:IntermediateAddress
|
||||||
next_addr:IntermediateAddress fwd_fee_remaining:Grams
|
next_addr:IntermediateAddress fwd_fee_remaining:Grams
|
||||||
msg:^(Message Any) = MsgEnvelope;
|
msg:^(Message Any) = MsgEnvelope;
|
||||||
|
msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata;
|
||||||
|
msg_envelope_v2#5 cur_addr:IntermediateAddress
|
||||||
|
next_addr:IntermediateAddress fwd_fee_remaining:Grams
|
||||||
|
msg:^(Message Any)
|
||||||
|
emitted_lt:(Maybe uint64)
|
||||||
|
metadata:(Maybe MsgMetadata) = MsgEnvelope;
|
||||||
//
|
//
|
||||||
msg_import_ext$000 msg:^(Message Any) transaction:^Transaction
|
msg_import_ext$000 msg:^(Message Any) transaction:^Transaction
|
||||||
= InMsg;
|
= InMsg;
|
||||||
|
@ -187,6 +193,9 @@ msg_discard_fin$110 in_msg:^MsgEnvelope transaction_id:uint64
|
||||||
fwd_fee:Grams = InMsg;
|
fwd_fee:Grams = InMsg;
|
||||||
msg_discard_tr$111 in_msg:^MsgEnvelope transaction_id:uint64
|
msg_discard_tr$111 in_msg:^MsgEnvelope transaction_id:uint64
|
||||||
fwd_fee:Grams proof_delivered:^Cell = InMsg;
|
fwd_fee:Grams proof_delivered:^Cell = InMsg;
|
||||||
|
msg_import_deferred_fin$00100 in_msg:^MsgEnvelope
|
||||||
|
transaction:^Transaction fwd_fee:Grams = InMsg;
|
||||||
|
msg_import_deferred_tr$00101 in_msg:^MsgEnvelope out_msg:^MsgEnvelope = InMsg;
|
||||||
//
|
//
|
||||||
import_fees$_ fees_collected:Grams
|
import_fees$_ fees_collected:Grams
|
||||||
value_imported:CurrencyCollection = ImportFees;
|
value_imported:CurrencyCollection = ImportFees;
|
||||||
|
@ -210,6 +219,10 @@ msg_export_tr_req$111 out_msg:^MsgEnvelope
|
||||||
imported:^InMsg = OutMsg;
|
imported:^InMsg = OutMsg;
|
||||||
msg_export_deq_imm$100 out_msg:^MsgEnvelope
|
msg_export_deq_imm$100 out_msg:^MsgEnvelope
|
||||||
reimport:^InMsg = OutMsg;
|
reimport:^InMsg = OutMsg;
|
||||||
|
msg_export_new_defer$10100 out_msg:^MsgEnvelope
|
||||||
|
transaction:^Transaction = OutMsg;
|
||||||
|
msg_export_deferred_tr$10101 out_msg:^MsgEnvelope
|
||||||
|
imported:^InMsg = OutMsg;
|
||||||
|
|
||||||
_ enqueued_lt:uint64 out_msg:^MsgEnvelope = EnqueuedMsg;
|
_ enqueued_lt:uint64 out_msg:^MsgEnvelope = EnqueuedMsg;
|
||||||
|
|
||||||
|
@ -224,8 +237,15 @@ _ (HashmapE 96 ProcessedUpto) = ProcessedInfo;
|
||||||
ihr_pending$_ import_lt:uint64 = IhrPendingSince;
|
ihr_pending$_ import_lt:uint64 = IhrPendingSince;
|
||||||
_ (HashmapE 320 IhrPendingSince) = IhrPendingInfo;
|
_ (HashmapE 320 IhrPendingSince) = IhrPendingInfo;
|
||||||
|
|
||||||
|
// key - created_lt
|
||||||
|
_ messages:(HashmapE 64 EnqueuedMsg) count:uint48 = AccountDispatchQueue;
|
||||||
|
// key - sender address, aug - min created_lt
|
||||||
|
_ (HashmapAugE 256 AccountDispatchQueue uint64) = DispatchQueue;
|
||||||
|
|
||||||
|
out_msg_queue_extra#0 dispatch_queue:DispatchQueue out_queue_size:(Maybe uint48) = OutMsgQueueExtra;
|
||||||
|
|
||||||
_ out_queue:OutMsgQueue proc_info:ProcessedInfo
|
_ out_queue:OutMsgQueue proc_info:ProcessedInfo
|
||||||
ihr_pending:IhrPendingInfo = OutMsgQueueInfo;
|
extra:(Maybe OutMsgQueueExtra) = OutMsgQueueInfo;
|
||||||
//
|
//
|
||||||
storage_used$_ cells:(VarUInteger 7) bits:(VarUInteger 7)
|
storage_used$_ cells:(VarUInteger 7) bits:(VarUInteger 7)
|
||||||
public_cells:(VarUInteger 7) = StorageUsed;
|
public_cells:(VarUInteger 7) = StorageUsed;
|
||||||
|
|
|
@ -3560,7 +3560,7 @@ LtCellRef Transaction::extract_out_msg(unsigned i) {
|
||||||
* @returns A triple of the logical time, the extracted output message and the transaction root.
|
* @returns A triple of the logical time, the extracted output message and the transaction root.
|
||||||
*/
|
*/
|
||||||
NewOutMsg Transaction::extract_out_msg_ext(unsigned i) {
|
NewOutMsg Transaction::extract_out_msg_ext(unsigned i) {
|
||||||
return {start_lt + i + 1, std::move(out_msgs.at(i)), root};
|
return {start_lt + i + 1, std::move(out_msgs.at(i)), root, i};
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -66,8 +66,11 @@ struct NewOutMsg {
|
||||||
ton::LogicalTime lt;
|
ton::LogicalTime lt;
|
||||||
Ref<vm::Cell> msg;
|
Ref<vm::Cell> msg;
|
||||||
Ref<vm::Cell> trans;
|
Ref<vm::Cell> trans;
|
||||||
NewOutMsg(ton::LogicalTime _lt, Ref<vm::Cell> _msg, Ref<vm::Cell> _trans)
|
unsigned msg_idx;
|
||||||
: lt(_lt), msg(std::move(_msg)), trans(std::move(_trans)) {
|
td::optional<MsgMetadata> metadata;
|
||||||
|
td::Ref<vm::Cell> msg_env_from_dispatch_queue; // Not null if from dispatch queue; in this case lt is emitted_lt
|
||||||
|
NewOutMsg(ton::LogicalTime _lt, Ref<vm::Cell> _msg, Ref<vm::Cell> _trans, unsigned _msg_idx)
|
||||||
|
: lt(_lt), msg(std::move(_msg)), trans(std::move(_trans)), msg_idx(_msg_idx) {
|
||||||
}
|
}
|
||||||
bool operator<(const NewOutMsg& other) const& {
|
bool operator<(const NewOutMsg& other) const& {
|
||||||
return lt < other.lt || (lt == other.lt && msg->get_hash() < other.msg->get_hash());
|
return lt < other.lt || (lt == other.lt && msg->get_hash() < other.msg->get_hash());
|
||||||
|
|
|
@ -949,8 +949,8 @@ bool TestNode::show_help(std::string command) {
|
||||||
"lasttrans[dump] <account-id> <trans-lt> <trans-hash> [<count>]\tShows or dumps specified transaction and "
|
"lasttrans[dump] <account-id> <trans-lt> <trans-hash> [<count>]\tShows or dumps specified transaction and "
|
||||||
"several preceding "
|
"several preceding "
|
||||||
"ones\n"
|
"ones\n"
|
||||||
"listblocktrans[rev] <block-id-ext> <count> [<start-account-id> <start-trans-lt>]\tLists block transactions, "
|
"listblocktrans[rev][meta] <block-id-ext> <count> [<start-account-id> <start-trans-lt>]\tLists block "
|
||||||
"starting immediately after or before the specified one\n"
|
"transactions, starting immediately after or before the specified one\n"
|
||||||
"blkproofchain[step] <from-block-id-ext> [<to-block-id-ext>]\tDownloads and checks proof of validity of the "
|
"blkproofchain[step] <from-block-id-ext> [<to-block-id-ext>]\tDownloads and checks proof of validity of the "
|
||||||
"second "
|
"second "
|
||||||
"indicated block (or the last known masterchain block) starting from given block\n"
|
"indicated block (or the last known masterchain block) starting from given block\n"
|
||||||
|
@ -1074,6 +1074,13 @@ bool TestNode::do_parse_line() {
|
||||||
return parse_block_id_ext(blkid) && parse_uint32(count) &&
|
return parse_block_id_ext(blkid) && parse_uint32(count) &&
|
||||||
(seekeoln() || (parse_hash(hash) && parse_lt(lt) && (mode |= 128) && seekeoln())) &&
|
(seekeoln() || (parse_hash(hash) && parse_lt(lt) && (mode |= 128) && seekeoln())) &&
|
||||||
get_block_transactions(blkid, mode, count, hash, lt);
|
get_block_transactions(blkid, mode, count, hash, lt);
|
||||||
|
} else if (word == "listblocktransmeta" || word == "listblocktransrevmeta") {
|
||||||
|
lt = 0;
|
||||||
|
int mode = (word == "listblocktransmeta" ? 7 : 0x47);
|
||||||
|
mode |= 256;
|
||||||
|
return parse_block_id_ext(blkid) && parse_uint32(count) &&
|
||||||
|
(seekeoln() || (parse_hash(hash) && parse_lt(lt) && (mode |= 128) && seekeoln())) &&
|
||||||
|
get_block_transactions(blkid, mode, count, hash, lt);
|
||||||
} else if (word == "blkproofchain" || word == "blkproofchainstep") {
|
} else if (word == "blkproofchain" || word == "blkproofchainstep") {
|
||||||
ton::BlockIdExt blkid2{};
|
ton::BlockIdExt blkid2{};
|
||||||
return parse_block_id_ext(blkid) && (seekeoln() || parse_block_id_ext(blkid2)) && seekeoln() &&
|
return parse_block_id_ext(blkid) && (seekeoln() || parse_block_id_ext(blkid2)) && seekeoln() &&
|
||||||
|
@ -2493,23 +2500,40 @@ bool TestNode::get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned
|
||||||
} else {
|
} else {
|
||||||
auto f = F.move_as_ok();
|
auto f = F.move_as_ok();
|
||||||
std::vector<TransId> transactions;
|
std::vector<TransId> transactions;
|
||||||
|
std::vector<ton::tl_object_ptr<ton::lite_api::liteServer_transactionMetadata>> metadata;
|
||||||
for (auto& id : f->ids_) {
|
for (auto& id : f->ids_) {
|
||||||
transactions.emplace_back(id->account_, id->lt_, id->hash_);
|
transactions.emplace_back(id->account_, id->lt_, id->hash_);
|
||||||
|
metadata.push_back(std::move(id->metadata_));
|
||||||
}
|
}
|
||||||
td::actor::send_closure_later(Self, &TestNode::got_block_transactions, ton::create_block_id(f->id_), mode,
|
td::actor::send_closure_later(Self, &TestNode::got_block_transactions, ton::create_block_id(f->id_), mode,
|
||||||
f->req_count_, f->incomplete_, std::move(transactions), std::move(f->proof_));
|
f->req_count_, f->incomplete_, std::move(transactions), std::move(metadata),
|
||||||
|
std::move(f->proof_));
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
void TestNode::got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete,
|
void TestNode::got_block_transactions(
|
||||||
std::vector<TestNode::TransId> trans, td::BufferSlice proof) {
|
ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete, std::vector<TestNode::TransId> trans,
|
||||||
|
std::vector<ton::tl_object_ptr<ton::lite_api::liteServer_transactionMetadata>> metadata, td::BufferSlice proof) {
|
||||||
LOG(INFO) << "got up to " << req_count << " transactions from block " << blkid.to_str();
|
LOG(INFO) << "got up to " << req_count << " transactions from block " << blkid.to_str();
|
||||||
auto out = td::TerminalIO::out();
|
auto out = td::TerminalIO::out();
|
||||||
int count = 0;
|
int count = 0;
|
||||||
for (auto& t : trans) {
|
for (size_t i = 0; i < trans.size(); ++i) {
|
||||||
|
auto& t = trans[i];
|
||||||
out << "transaction #" << ++count << ": account " << t.acc_addr.to_hex() << " lt " << t.trans_lt << " hash "
|
out << "transaction #" << ++count << ": account " << t.acc_addr.to_hex() << " lt " << t.trans_lt << " hash "
|
||||||
<< t.trans_hash.to_hex() << std::endl;
|
<< t.trans_hash.to_hex() << std::endl;
|
||||||
|
if (mode & 256) {
|
||||||
|
auto& meta = metadata.at(i);
|
||||||
|
if (meta == nullptr) {
|
||||||
|
out << " metadata: <none>" << std::endl;
|
||||||
|
} else {
|
||||||
|
out << " metadata: "
|
||||||
|
<< block::MsgMetadata{(td::uint32)meta->depth_, meta->initiator_->workchain_, meta->initiator_->id_,
|
||||||
|
(ton::LogicalTime)meta->initiator_lt_}
|
||||||
|
.to_str()
|
||||||
|
<< std::endl;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out << (incomplete ? "(block transaction list incomplete)" : "(end of block transaction list)") << std::endl;
|
out << (incomplete ? "(block transaction list incomplete)" : "(end of block transaction list)") << std::endl;
|
||||||
}
|
}
|
||||||
|
|
|
@ -258,7 +258,9 @@ class TestNode : public td::actor::Actor {
|
||||||
bool get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned count, ton::Bits256 acc_addr,
|
bool get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned count, ton::Bits256 acc_addr,
|
||||||
ton::LogicalTime lt);
|
ton::LogicalTime lt);
|
||||||
void got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete,
|
void got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete,
|
||||||
std::vector<TransId> trans, td::BufferSlice proof);
|
std::vector<TransId> trans,
|
||||||
|
std::vector<ton::tl_object_ptr<ton::lite_api::liteServer_transactionMetadata>> metadata,
|
||||||
|
td::BufferSlice proof);
|
||||||
bool get_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode);
|
bool get_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode);
|
||||||
void got_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode, td::BufferSlice res);
|
void got_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode, td::BufferSlice res);
|
||||||
bool get_creator_stats(ton::BlockIdExt blkid, int mode, unsigned req_count, ton::Bits256 start_after,
|
bool get_creator_stats(ton::BlockIdExt blkid, int mode, unsigned req_count, ton::Bits256 start_after,
|
||||||
|
|
|
@ -66,6 +66,12 @@ class optional {
|
||||||
DCHECK(*this);
|
DCHECK(*this);
|
||||||
return impl_.ok_ref();
|
return impl_.ok_ref();
|
||||||
}
|
}
|
||||||
|
T &value_force() {
|
||||||
|
if (!*this) {
|
||||||
|
*this = T();
|
||||||
|
}
|
||||||
|
return value();
|
||||||
|
}
|
||||||
T &operator*() {
|
T &operator*() {
|
||||||
return value();
|
return value();
|
||||||
}
|
}
|
||||||
|
@ -88,6 +94,14 @@ class optional {
|
||||||
impl_.emplace(std::forward<ArgsT>(args)...);
|
impl_.emplace(std::forward<ArgsT>(args)...);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool operator==(const optional& other) const {
|
||||||
|
return (bool)*this == (bool)other && (!(bool)*this || value() == other.value());
|
||||||
|
}
|
||||||
|
|
||||||
|
bool operator!=(const optional& other) const {
|
||||||
|
return !(*this == other);
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Result<T> impl_;
|
Result<T> impl_;
|
||||||
};
|
};
|
||||||
|
|
|
@ -159,6 +159,7 @@ std::string lite_query_name_by_id(int id) {
|
||||||
{lite_api::liteServer_getLibrariesWithProof::ID, "getLibrariesWithProof"},
|
{lite_api::liteServer_getLibrariesWithProof::ID, "getLibrariesWithProof"},
|
||||||
{lite_api::liteServer_getShardBlockProof::ID, "getShardBlockProof"},
|
{lite_api::liteServer_getShardBlockProof::ID, "getShardBlockProof"},
|
||||||
{lite_api::liteServer_getOutMsgQueueSizes::ID, "getOutMsgQueueSizes"},
|
{lite_api::liteServer_getOutMsgQueueSizes::ID, "getOutMsgQueueSizes"},
|
||||||
|
{lite_api::liteServer_getBlockOutMsgQueueSize::ID, "getBlockOutMsgQueueSize"},
|
||||||
{lite_api::liteServer_nonfinal_getCandidate::ID, "nonfinal.getCandidate"},
|
{lite_api::liteServer_nonfinal_getCandidate::ID, "nonfinal.getCandidate"},
|
||||||
{lite_api::liteServer_nonfinal_getValidatorGroups::ID, "nonfinal.getValidatorGroups"}};
|
{lite_api::liteServer_nonfinal_getValidatorGroups::ID, "nonfinal.getValidatorGroups"}};
|
||||||
auto it = names.find(id);
|
auto it = names.find(id);
|
||||||
|
|
|
@ -41,7 +41,8 @@ liteServer.shardInfo id:tonNode.blockIdExt shardblk:tonNode.blockIdExt shard_pro
|
||||||
liteServer.allShardsInfo id:tonNode.blockIdExt proof:bytes data:bytes = liteServer.AllShardsInfo;
|
liteServer.allShardsInfo id:tonNode.blockIdExt proof:bytes data:bytes = liteServer.AllShardsInfo;
|
||||||
liteServer.transactionInfo id:tonNode.blockIdExt proof:bytes transaction:bytes = liteServer.TransactionInfo;
|
liteServer.transactionInfo id:tonNode.blockIdExt proof:bytes transaction:bytes = liteServer.TransactionInfo;
|
||||||
liteServer.transactionList ids:(vector tonNode.blockIdExt) transactions:bytes = liteServer.TransactionList;
|
liteServer.transactionList ids:(vector tonNode.blockIdExt) transactions:bytes = liteServer.TransactionList;
|
||||||
liteServer.transactionId mode:# account:mode.0?int256 lt:mode.1?long hash:mode.2?int256 = liteServer.TransactionId;
|
liteServer.transactionMetadata mode:# depth:int initiator:liteServer.accountId initiator_lt:long = liteServer.TransactionMetadata;
|
||||||
|
liteServer.transactionId#b12f65af mode:# account:mode.0?int256 lt:mode.1?long hash:mode.2?int256 metadata:mode.8?liteServer.transactionMetadata = liteServer.TransactionId;
|
||||||
liteServer.transactionId3 account:int256 lt:long = liteServer.TransactionId3;
|
liteServer.transactionId3 account:int256 lt:long = liteServer.TransactionId3;
|
||||||
liteServer.blockTransactions id:tonNode.blockIdExt req_count:# incomplete:Bool ids:(vector liteServer.transactionId) proof:bytes = liteServer.BlockTransactions;
|
liteServer.blockTransactions id:tonNode.blockIdExt req_count:# incomplete:Bool ids:(vector liteServer.transactionId) proof:bytes = liteServer.BlockTransactions;
|
||||||
liteServer.blockTransactionsExt id:tonNode.blockIdExt req_count:# incomplete:Bool transactions:bytes proof:bytes = liteServer.BlockTransactionsExt;
|
liteServer.blockTransactionsExt id:tonNode.blockIdExt req_count:# incomplete:Bool transactions:bytes proof:bytes = liteServer.BlockTransactionsExt;
|
||||||
|
@ -59,6 +60,7 @@ liteServer.shardBlockProof masterchain_id:tonNode.blockIdExt links:(vector liteS
|
||||||
liteServer.lookupBlockResult id:tonNode.blockIdExt mode:# mc_block_id:tonNode.blockIdExt client_mc_state_proof:bytes mc_block_proof:bytes shard_links:(vector liteServer.shardBlockLink) header:bytes prev_header:bytes = liteServer.LookupBlockResult;
|
liteServer.lookupBlockResult id:tonNode.blockIdExt mode:# mc_block_id:tonNode.blockIdExt client_mc_state_proof:bytes mc_block_proof:bytes shard_links:(vector liteServer.shardBlockLink) header:bytes prev_header:bytes = liteServer.LookupBlockResult;
|
||||||
liteServer.outMsgQueueSize id:tonNode.blockIdExt size:int = liteServer.OutMsgQueueSize;
|
liteServer.outMsgQueueSize id:tonNode.blockIdExt size:int = liteServer.OutMsgQueueSize;
|
||||||
liteServer.outMsgQueueSizes shards:(vector liteServer.outMsgQueueSize) ext_msg_queue_size_limit:int = liteServer.OutMsgQueueSizes;
|
liteServer.outMsgQueueSizes shards:(vector liteServer.outMsgQueueSize) ext_msg_queue_size_limit:int = liteServer.OutMsgQueueSizes;
|
||||||
|
liteServer.blockOutMsgQueueSize mode:# id:tonNode.blockIdExt size:long proof:mode.0?bytes = liteServer.BlockOutMsgQueueSize;
|
||||||
|
|
||||||
liteServer.debug.verbosity value:int = liteServer.debug.Verbosity;
|
liteServer.debug.verbosity value:int = liteServer.debug.Verbosity;
|
||||||
|
|
||||||
|
@ -97,6 +99,7 @@ liteServer.getLibraries library_list:(vector int256) = liteServer.LibraryResult;
|
||||||
liteServer.getLibrariesWithProof id:tonNode.blockIdExt mode:# library_list:(vector int256) = liteServer.LibraryResultWithProof;
|
liteServer.getLibrariesWithProof id:tonNode.blockIdExt mode:# library_list:(vector int256) = liteServer.LibraryResultWithProof;
|
||||||
liteServer.getShardBlockProof id:tonNode.blockIdExt = liteServer.ShardBlockProof;
|
liteServer.getShardBlockProof id:tonNode.blockIdExt = liteServer.ShardBlockProof;
|
||||||
liteServer.getOutMsgQueueSizes mode:# wc:mode.0?int shard:mode.0?long = liteServer.OutMsgQueueSizes;
|
liteServer.getOutMsgQueueSizes mode:# wc:mode.0?int shard:mode.0?long = liteServer.OutMsgQueueSizes;
|
||||||
|
liteServer.getBlockOutMsgQueueSize mode:# id:tonNode.blockIdExt want_proof:mode.0?true = liteServer.BlockOutMsgQueueSize;
|
||||||
|
|
||||||
liteServer.nonfinal.getValidatorGroups mode:# wc:mode.0?int shard:mode.0?long = liteServer.nonfinal.ValidatorGroups;
|
liteServer.nonfinal.getValidatorGroups mode:# wc:mode.0?int shard:mode.0?long = liteServer.nonfinal.ValidatorGroups;
|
||||||
liteServer.nonfinal.getCandidate id:liteServer.nonfinal.candidateId = liteServer.nonfinal.Candidate;
|
liteServer.nonfinal.getCandidate id:liteServer.nonfinal.candidateId = liteServer.nonfinal.Candidate;
|
||||||
|
|
Binary file not shown.
|
@ -653,7 +653,7 @@ engine.validator.onePerfTimerStat time:int min:double avg:double max:double = en
|
||||||
engine.validator.perfTimerStatsByName name:string stats:(vector engine.validator.OnePerfTimerStat) = engine.validator.PerfTimerStatsByName;
|
engine.validator.perfTimerStatsByName name:string stats:(vector engine.validator.OnePerfTimerStat) = engine.validator.PerfTimerStatsByName;
|
||||||
engine.validator.perfTimerStats stats:(vector engine.validator.PerfTimerStatsByName) = engine.validator.PerfTimerStats;
|
engine.validator.perfTimerStats stats:(vector engine.validator.PerfTimerStatsByName) = engine.validator.PerfTimerStats;
|
||||||
|
|
||||||
engine.validator.shardOutQueueSize size:int = engine.validator.ShardOutQueueSize;
|
engine.validator.shardOutQueueSize size:long = engine.validator.ShardOutQueueSize;
|
||||||
|
|
||||||
|
|
||||||
---functions---
|
---functions---
|
||||||
|
|
Binary file not shown.
|
@ -57,7 +57,10 @@ enum GlobalCapabilities {
|
||||||
capBounceMsgBody = 4,
|
capBounceMsgBody = 4,
|
||||||
capReportVersion = 8,
|
capReportVersion = 8,
|
||||||
capSplitMergeTransactions = 16,
|
capSplitMergeTransactions = 16,
|
||||||
capShortDequeue = 32
|
capShortDequeue = 32,
|
||||||
|
capStoreOutMsgQueueSize = 64,
|
||||||
|
capMsgMetadata = 128,
|
||||||
|
capDeferMessages = 256
|
||||||
};
|
};
|
||||||
|
|
||||||
inline int shard_pfx_len(ShardId shard) {
|
inline int shard_pfx_len(ShardId shard) {
|
||||||
|
|
|
@ -3492,7 +3492,7 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_getShardO
|
||||||
if (!dest) {
|
if (!dest) {
|
||||||
td::actor::send_closure(
|
td::actor::send_closure(
|
||||||
manager, &ton::validator::ValidatorManagerInterface::get_out_msg_queue_size, handle->id(),
|
manager, &ton::validator::ValidatorManagerInterface::get_out_msg_queue_size, handle->id(),
|
||||||
[promise = std::move(promise)](td::Result<td::uint32> R) mutable {
|
[promise = std::move(promise)](td::Result<td::uint64> R) mutable {
|
||||||
if (R.is_error()) {
|
if (R.is_error()) {
|
||||||
promise.set_value(create_control_query_error(R.move_as_error_prefix("failed to get queue size: ")));
|
promise.set_value(create_control_query_error(R.move_as_error_prefix("failed to get queue size: ")));
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -44,7 +44,8 @@ class Collator final : public td::actor::Actor {
|
||||||
return SUPPORTED_VERSION;
|
return SUPPORTED_VERSION;
|
||||||
}
|
}
|
||||||
static constexpr long long supported_capabilities() {
|
static constexpr long long supported_capabilities() {
|
||||||
return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue;
|
return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue |
|
||||||
|
ton::capStoreOutMsgQueueSize | ton::capMsgMetadata | ton::capDeferMessages;
|
||||||
}
|
}
|
||||||
using LtCellRef = block::LtCellRef;
|
using LtCellRef = block::LtCellRef;
|
||||||
using NewOutMsg = block::NewOutMsg;
|
using NewOutMsg = block::NewOutMsg;
|
||||||
|
@ -192,7 +193,9 @@ class Collator final : public td::actor::Actor {
|
||||||
std::priority_queue<NewOutMsg, std::vector<NewOutMsg>, std::greater<NewOutMsg>> new_msgs;
|
std::priority_queue<NewOutMsg, std::vector<NewOutMsg>, std::greater<NewOutMsg>> new_msgs;
|
||||||
std::pair<ton::LogicalTime, ton::Bits256> last_proc_int_msg_, first_unproc_int_msg_;
|
std::pair<ton::LogicalTime, ton::Bits256> last_proc_int_msg_, first_unproc_int_msg_;
|
||||||
std::unique_ptr<vm::AugmentedDictionary> in_msg_dict, out_msg_dict, out_msg_queue_, sibling_out_msg_queue_;
|
std::unique_ptr<vm::AugmentedDictionary> in_msg_dict, out_msg_dict, out_msg_queue_, sibling_out_msg_queue_;
|
||||||
td::uint32 out_msg_queue_size_ = 0;
|
std::map<StdSmcAddress, size_t> unprocessed_deferred_messages_; // number of messages from dispatch queue in new_msgs
|
||||||
|
td::uint64 out_msg_queue_size_ = 0;
|
||||||
|
bool have_out_msg_queue_size_in_state_ = false;
|
||||||
std::unique_ptr<vm::Dictionary> ihr_pending;
|
std::unique_ptr<vm::Dictionary> ihr_pending;
|
||||||
std::shared_ptr<block::MsgProcessedUptoCollection> processed_upto_, sibling_processed_upto_;
|
std::shared_ptr<block::MsgProcessedUptoCollection> processed_upto_, sibling_processed_upto_;
|
||||||
std::unique_ptr<vm::Dictionary> block_create_stats_;
|
std::unique_ptr<vm::Dictionary> block_create_stats_;
|
||||||
|
@ -203,6 +206,16 @@ class Collator final : public td::actor::Actor {
|
||||||
std::vector<Ref<vm::Cell>> collated_roots_;
|
std::vector<Ref<vm::Cell>> collated_roots_;
|
||||||
std::unique_ptr<ton::BlockCandidate> block_candidate;
|
std::unique_ptr<ton::BlockCandidate> block_candidate;
|
||||||
|
|
||||||
|
std::unique_ptr<vm::AugmentedDictionary> dispatch_queue_;
|
||||||
|
std::map<StdSmcAddress, td::uint32> sender_generated_messages_count_;
|
||||||
|
unsigned dispatch_queue_ops_{0};
|
||||||
|
std::map<StdSmcAddress, LogicalTime> last_dispatch_queue_emitted_lt_;
|
||||||
|
bool have_unprocessed_account_dispatch_queue_ = true;
|
||||||
|
|
||||||
|
bool msg_metadata_enabled_ = false;
|
||||||
|
bool deferring_messages_enabled_ = false;
|
||||||
|
bool store_out_msg_queue_size_ = false;
|
||||||
|
|
||||||
td::PerfWarningTimer perf_timer_;
|
td::PerfWarningTimer perf_timer_;
|
||||||
//
|
//
|
||||||
block::Account* lookup_account(td::ConstBitPtr addr) const;
|
block::Account* lookup_account(td::ConstBitPtr addr) const;
|
||||||
|
@ -231,7 +244,7 @@ class Collator final : public td::actor::Actor {
|
||||||
bool fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton::ShardIdFull& owner);
|
bool fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton::ShardIdFull& owner);
|
||||||
bool fix_processed_upto(block::MsgProcessedUptoCollection& upto);
|
bool fix_processed_upto(block::MsgProcessedUptoCollection& upto);
|
||||||
void got_neighbor_out_queue(int i, td::Result<Ref<MessageQueue>> res);
|
void got_neighbor_out_queue(int i, td::Result<Ref<MessageQueue>> res);
|
||||||
void got_out_queue_size(size_t i, td::Result<td::uint32> res);
|
void got_out_queue_size(size_t i, td::Result<td::uint64> res);
|
||||||
bool adjust_shard_config();
|
bool adjust_shard_config();
|
||||||
bool store_shard_fees(ShardIdFull shard, const block::CurrencyCollection& fees,
|
bool store_shard_fees(ShardIdFull shard, const block::CurrencyCollection& fees,
|
||||||
const block::CurrencyCollection& created);
|
const block::CurrencyCollection& created);
|
||||||
|
@ -249,7 +262,8 @@ class Collator final : public td::actor::Actor {
|
||||||
Ref<vm::Cell>& in_msg);
|
Ref<vm::Cell>& in_msg);
|
||||||
bool create_ticktock_transactions(int mask);
|
bool create_ticktock_transactions(int mask);
|
||||||
bool create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask);
|
bool create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask);
|
||||||
Ref<vm::Cell> create_ordinary_transaction(Ref<vm::Cell> msg_root, bool is_special_tx = false);
|
Ref<vm::Cell> create_ordinary_transaction(Ref<vm::Cell> msg_root, td::optional<block::MsgMetadata> msg_metadata,
|
||||||
|
LogicalTime after_lt, bool is_special_tx = false);
|
||||||
bool check_cur_validator_set();
|
bool check_cur_validator_set();
|
||||||
bool unpack_last_mc_state();
|
bool unpack_last_mc_state();
|
||||||
bool unpack_last_state();
|
bool unpack_last_state();
|
||||||
|
@ -278,7 +292,7 @@ class Collator final : public td::actor::Actor {
|
||||||
int priority);
|
int priority);
|
||||||
// td::Result<bool> register_external_message(td::Slice ext_msg_boc);
|
// td::Result<bool> register_external_message(td::Slice ext_msg_boc);
|
||||||
void register_new_msg(block::NewOutMsg msg);
|
void register_new_msg(block::NewOutMsg msg);
|
||||||
void register_new_msgs(block::transaction::Transaction& trans);
|
void register_new_msgs(block::transaction::Transaction& trans, td::optional<block::MsgMetadata> msg_metadata);
|
||||||
bool process_new_messages(bool enqueue_only = false);
|
bool process_new_messages(bool enqueue_only = false);
|
||||||
int process_one_new_message(block::NewOutMsg msg, bool enqueue_only = false, Ref<vm::Cell>* is_special = nullptr);
|
int process_one_new_message(block::NewOutMsg msg, bool enqueue_only = false, Ref<vm::Cell>* is_special = nullptr);
|
||||||
bool process_inbound_internal_messages();
|
bool process_inbound_internal_messages();
|
||||||
|
@ -286,10 +300,15 @@ class Collator final : public td::actor::Actor {
|
||||||
const block::McShardDescr& src_nb);
|
const block::McShardDescr& src_nb);
|
||||||
bool process_inbound_external_messages();
|
bool process_inbound_external_messages();
|
||||||
int process_external_message(Ref<vm::Cell> msg);
|
int process_external_message(Ref<vm::Cell> msg);
|
||||||
bool enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt);
|
bool process_dispatch_queue();
|
||||||
|
bool process_deferred_message(Ref<vm::CellSlice> enq_msg, StdSmcAddress src_addr, LogicalTime lt,
|
||||||
|
td::optional<block::MsgMetadata>& msg_metadata);
|
||||||
|
bool enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, StdSmcAddress src_addr,
|
||||||
|
bool defer = false);
|
||||||
bool enqueue_transit_message(Ref<vm::Cell> msg, Ref<vm::Cell> old_msg_env, ton::AccountIdPrefixFull prev_prefix,
|
bool enqueue_transit_message(Ref<vm::Cell> msg, Ref<vm::Cell> old_msg_env, ton::AccountIdPrefixFull prev_prefix,
|
||||||
ton::AccountIdPrefixFull cur_prefix, ton::AccountIdPrefixFull dest_prefix,
|
ton::AccountIdPrefixFull cur_prefix, ton::AccountIdPrefixFull dest_prefix,
|
||||||
td::RefInt256 fwd_fee_remaining);
|
td::RefInt256 fwd_fee_remaining, td::optional<block::MsgMetadata> msg_metadata,
|
||||||
|
td::optional<LogicalTime> emitted_lt = {});
|
||||||
bool delete_out_msg_queue_msg(td::ConstBitPtr key);
|
bool delete_out_msg_queue_msg(td::ConstBitPtr key);
|
||||||
bool insert_in_msg(Ref<vm::Cell> in_msg);
|
bool insert_in_msg(Ref<vm::Cell> in_msg);
|
||||||
bool insert_out_msg(Ref<vm::Cell> out_msg);
|
bool insert_out_msg(Ref<vm::Cell> out_msg);
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
/*
|
/*
|
||||||
This file is part of TON Blockchain Library.
|
This file is part of TON Blockchain Library.
|
||||||
|
|
||||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Lesser General Public License as published by
|
it under the terms of the GNU Lesser General Public License as published by
|
||||||
the Free Software Foundation, either version 2 of the License, or
|
the Free Software Foundation, either version 2 of the License, or
|
||||||
(at your option) any later version.
|
(at your option) any later version.
|
||||||
|
@ -50,6 +50,7 @@ static const td::uint32 SPLIT_MAX_QUEUE_SIZE = 100000;
|
||||||
static const td::uint32 MERGE_MAX_QUEUE_SIZE = 2047;
|
static const td::uint32 MERGE_MAX_QUEUE_SIZE = 2047;
|
||||||
static const td::uint32 SKIP_EXTERNALS_QUEUE_SIZE = 8000;
|
static const td::uint32 SKIP_EXTERNALS_QUEUE_SIZE = 8000;
|
||||||
static const int HIGH_PRIORITY_EXTERNAL = 10; // don't skip high priority externals when queue is big
|
static const int HIGH_PRIORITY_EXTERNAL = 10; // don't skip high priority externals when queue is big
|
||||||
|
static const int DEFER_MESSAGES_AFTER = 10; // 10'th and later messages from address will be deferred
|
||||||
|
|
||||||
#define DBG(__n) dbg(__n)&&
|
#define DBG(__n) dbg(__n)&&
|
||||||
#define DSTART int __dcnt = 0;
|
#define DSTART int __dcnt = 0;
|
||||||
|
@ -694,6 +695,9 @@ bool Collator::unpack_last_mc_state() {
|
||||||
create_stats_enabled_ = config_->create_stats_enabled();
|
create_stats_enabled_ = config_->create_stats_enabled();
|
||||||
report_version_ = config_->has_capability(ton::capReportVersion);
|
report_version_ = config_->has_capability(ton::capReportVersion);
|
||||||
short_dequeue_records_ = config_->has_capability(ton::capShortDequeue);
|
short_dequeue_records_ = config_->has_capability(ton::capShortDequeue);
|
||||||
|
store_out_msg_queue_size_ = config_->has_capability(ton::capStoreOutMsgQueueSize);
|
||||||
|
msg_metadata_enabled_ = config_->has_capability(ton::capMsgMetadata);
|
||||||
|
deferring_messages_enabled_ = config_->has_capability(ton::capDeferMessages);
|
||||||
shard_conf_ = std::make_unique<block::ShardConfig>(*config_);
|
shard_conf_ = std::make_unique<block::ShardConfig>(*config_);
|
||||||
prev_key_block_exists_ = config_->get_last_key_block(prev_key_block_, prev_key_block_lt_);
|
prev_key_block_exists_ = config_->get_last_key_block(prev_key_block_, prev_key_block_lt_);
|
||||||
if (prev_key_block_exists_) {
|
if (prev_key_block_exists_) {
|
||||||
|
@ -794,19 +798,20 @@ bool Collator::request_neighbor_msg_queues() {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Requests the size of the outbound message queue from the previous state(s).
|
* Requests the size of the outbound message queue from the previous state(s) if needed.
|
||||||
*
|
*
|
||||||
* @returns True if the request was successful, false otherwise.
|
* @returns True if the request was successful, false otherwise.
|
||||||
*/
|
*/
|
||||||
bool Collator::request_out_msg_queue_size() {
|
bool Collator::request_out_msg_queue_size() {
|
||||||
if (after_split_) {
|
if (have_out_msg_queue_size_in_state_) {
|
||||||
// If block is after split, the size is calculated during split (see Collator::split_last_state)
|
// if after_split then have_out_msg_queue_size_in_state_ is always true, since the size is calculated during split
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
out_msg_queue_size_ = 0;
|
||||||
for (size_t i = 0; i < prev_blocks.size(); ++i) {
|
for (size_t i = 0; i < prev_blocks.size(); ++i) {
|
||||||
++pending;
|
++pending;
|
||||||
send_closure_later(manager, &ValidatorManager::get_out_msg_queue_size, prev_blocks[i],
|
send_closure_later(manager, &ValidatorManager::get_out_msg_queue_size, prev_blocks[i],
|
||||||
[self = get_self(), i](td::Result<td::uint32> res) {
|
[self = get_self(), i](td::Result<td::uint64> res) {
|
||||||
td::actor::send_closure(std::move(self), &Collator::got_out_queue_size, i, std::move(res));
|
td::actor::send_closure(std::move(self), &Collator::got_out_queue_size, i, std::move(res));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
@ -885,14 +890,14 @@ void Collator::got_neighbor_out_queue(int i, td::Result<Ref<MessageQueue>> res)
|
||||||
* @param i The index of the previous block (0 or 1).
|
* @param i The index of the previous block (0 or 1).
|
||||||
* @param res The result object containing the size of the queue.
|
* @param res The result object containing the size of the queue.
|
||||||
*/
|
*/
|
||||||
void Collator::got_out_queue_size(size_t i, td::Result<td::uint32> res) {
|
void Collator::got_out_queue_size(size_t i, td::Result<td::uint64> res) {
|
||||||
--pending;
|
--pending;
|
||||||
if (res.is_error()) {
|
if (res.is_error()) {
|
||||||
fatal_error(
|
fatal_error(
|
||||||
res.move_as_error_prefix(PSTRING() << "failed to get message queue size from prev block #" << i << ": "));
|
res.move_as_error_prefix(PSTRING() << "failed to get message queue size from prev block #" << i << ": "));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
td::uint32 size = res.move_as_ok();
|
td::uint64 size = res.move_as_ok();
|
||||||
LOG(WARNING) << "got outbound queue size from prev block #" << i << ": " << size;
|
LOG(WARNING) << "got outbound queue size from prev block #" << i << ": " << size;
|
||||||
out_msg_queue_size_ += size;
|
out_msg_queue_size_ += size;
|
||||||
check_pending();
|
check_pending();
|
||||||
|
@ -1016,7 +1021,7 @@ bool Collator::split_last_state(block::ShardState& ss) {
|
||||||
return fatal_error(res2.move_as_error());
|
return fatal_error(res2.move_as_error());
|
||||||
}
|
}
|
||||||
sibling_processed_upto_ = res2.move_as_ok();
|
sibling_processed_upto_ = res2.move_as_ok();
|
||||||
auto res3 = ss.split(shard_, &out_msg_queue_size_);
|
auto res3 = ss.split(shard_);
|
||||||
if (res3.is_error()) {
|
if (res3.is_error()) {
|
||||||
return fatal_error(std::move(res3));
|
return fatal_error(std::move(res3));
|
||||||
}
|
}
|
||||||
|
@ -1052,7 +1057,12 @@ bool Collator::import_shard_state_data(block::ShardState& ss) {
|
||||||
out_msg_queue_ = std::move(ss.out_msg_queue_);
|
out_msg_queue_ = std::move(ss.out_msg_queue_);
|
||||||
processed_upto_ = std::move(ss.processed_upto_);
|
processed_upto_ = std::move(ss.processed_upto_);
|
||||||
ihr_pending = std::move(ss.ihr_pending_);
|
ihr_pending = std::move(ss.ihr_pending_);
|
||||||
|
dispatch_queue_ = std::move(ss.dispatch_queue_);
|
||||||
block_create_stats_ = std::move(ss.block_create_stats_);
|
block_create_stats_ = std::move(ss.block_create_stats_);
|
||||||
|
if (ss.out_msg_queue_size_) {
|
||||||
|
have_out_msg_queue_size_in_state_ = true;
|
||||||
|
out_msg_queue_size_ = ss.out_msg_queue_size_.value();
|
||||||
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2090,6 +2100,11 @@ bool Collator::do_collate() {
|
||||||
if (!init_value_create()) {
|
if (!init_value_create()) {
|
||||||
return fatal_error("cannot compute the value to be created / minted / recovered");
|
return fatal_error("cannot compute the value to be created / minted / recovered");
|
||||||
}
|
}
|
||||||
|
// 2-. take messages from dispatch queue
|
||||||
|
LOG(INFO) << "process dispatch queue";
|
||||||
|
if (!process_dispatch_queue()) {
|
||||||
|
return fatal_error("cannot process dispatch queue");
|
||||||
|
}
|
||||||
// 2. tick transactions
|
// 2. tick transactions
|
||||||
LOG(INFO) << "create tick transactions";
|
LOG(INFO) << "create tick transactions";
|
||||||
if (!create_ticktock_transactions(2)) {
|
if (!create_ticktock_transactions(2)) {
|
||||||
|
@ -2597,7 +2612,7 @@ bool Collator::create_special_transaction(block::CurrencyCollection amount, Ref<
|
||||||
}
|
}
|
||||||
CHECK(block::gen::t_Message_Any.validate_ref(msg));
|
CHECK(block::gen::t_Message_Any.validate_ref(msg));
|
||||||
CHECK(block::tlb::t_Message.validate_ref(msg));
|
CHECK(block::tlb::t_Message.validate_ref(msg));
|
||||||
if (process_one_new_message(block::NewOutMsg{lt, msg, Ref<vm::Cell>{}}, false, &in_msg) != 1) {
|
if (process_one_new_message(block::NewOutMsg{lt, msg, Ref<vm::Cell>{}, 0}, false, &in_msg) != 1) {
|
||||||
return fatal_error("cannot generate special transaction for recovering "s + amount.to_str() + " to account " +
|
return fatal_error("cannot generate special transaction for recovering "s + amount.to_str() + " to account " +
|
||||||
addr.to_hex());
|
addr.to_hex());
|
||||||
}
|
}
|
||||||
|
@ -2639,13 +2654,18 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
req_start_lt = std::max(req_start_lt, start_lt + 1);
|
req_start_lt = std::max(req_start_lt, start_lt + 1);
|
||||||
|
auto it = last_dispatch_queue_emitted_lt_.find(acc->addr);
|
||||||
|
if (it != last_dispatch_queue_emitted_lt_.end()) {
|
||||||
|
req_start_lt = std::max(req_start_lt, it->second + 1);
|
||||||
|
}
|
||||||
if (acc->last_trans_end_lt_ >= start_lt && acc->transactions.empty()) {
|
if (acc->last_trans_end_lt_ >= start_lt && acc->transactions.empty()) {
|
||||||
return fatal_error(td::Status::Error(-666, PSTRING()
|
return fatal_error(td::Status::Error(-666, PSTRING()
|
||||||
<< "last transaction time in the state of account " << workchain()
|
<< "last transaction time in the state of account " << workchain()
|
||||||
<< ":" << smc_addr.to_hex() << " is too large"));
|
<< ":" << smc_addr.to_hex() << " is too large"));
|
||||||
}
|
}
|
||||||
std::unique_ptr<block::transaction::Transaction> trans = std::make_unique<block::transaction::Transaction>(
|
std::unique_ptr<block::transaction::Transaction> trans = std::make_unique<block::transaction::Transaction>(
|
||||||
*acc, mask == 2 ? block::transaction::Transaction::tr_tick : block::transaction::Transaction::tr_tock, req_start_lt, now_);
|
*acc, mask == 2 ? block::transaction::Transaction::tr_tick : block::transaction::Transaction::tr_tock,
|
||||||
|
req_start_lt, now_);
|
||||||
if (!trans->prepare_storage_phase(storage_phase_cfg_, true)) {
|
if (!trans->prepare_storage_phase(storage_phase_cfg_, true)) {
|
||||||
return fatal_error(td::Status::Error(
|
return fatal_error(td::Status::Error(
|
||||||
-666, std::string{"cannot create storage phase of a new transaction for smart contract "} + smc_addr.to_hex()));
|
-666, std::string{"cannot create storage phase of a new transaction for smart contract "} + smc_addr.to_hex()));
|
||||||
|
@ -2675,7 +2695,8 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t
|
||||||
td::Status::Error(-666, std::string{"cannot commit new transaction for smart contract "} + smc_addr.to_hex()));
|
td::Status::Error(-666, std::string{"cannot commit new transaction for smart contract "} + smc_addr.to_hex()));
|
||||||
}
|
}
|
||||||
update_max_lt(acc->last_trans_end_lt_);
|
update_max_lt(acc->last_trans_end_lt_);
|
||||||
register_new_msgs(*trans);
|
block::MsgMetadata new_msg_metadata{0, acc->workchain, acc->addr, trans->start_lt};
|
||||||
|
register_new_msgs(*trans, std::move(new_msg_metadata));
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2683,11 +2704,15 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t
|
||||||
* Creates an ordinary transaction using a given message.
|
* Creates an ordinary transaction using a given message.
|
||||||
*
|
*
|
||||||
* @param msg_root The root of the message to be processed serialized using Message TLB-scheme.
|
* @param msg_root The root of the message to be processed serialized using Message TLB-scheme.
|
||||||
|
* @param msg_metadata Metadata of the inbound message.
|
||||||
|
* @param after_lt Transaction lt will be grater than after_lt. Used for deferred messages.
|
||||||
* @param is_special_tx True if creating a special transaction (mint/recover), false otherwise.
|
* @param is_special_tx True if creating a special transaction (mint/recover), false otherwise.
|
||||||
*
|
*
|
||||||
* @returns The root of the serialized transaction, or an empty reference if the transaction creation fails.
|
* @returns The root of the serialized transaction, or an empty reference if the transaction creation fails.
|
||||||
*/
|
*/
|
||||||
Ref<vm::Cell> Collator::create_ordinary_transaction(Ref<vm::Cell> msg_root, bool is_special_tx) {
|
Ref<vm::Cell> Collator::create_ordinary_transaction(Ref<vm::Cell> msg_root,
|
||||||
|
td::optional<block::MsgMetadata> msg_metadata, LogicalTime after_lt,
|
||||||
|
bool is_special_tx) {
|
||||||
ton::StdSmcAddress addr;
|
ton::StdSmcAddress addr;
|
||||||
auto cs = vm::load_cell_slice(msg_root);
|
auto cs = vm::load_cell_slice(msg_root);
|
||||||
bool external;
|
bool external;
|
||||||
|
@ -2731,8 +2756,15 @@ Ref<vm::Cell> Collator::create_ordinary_transaction(Ref<vm::Cell> msg_root, bool
|
||||||
block::Account* acc = acc_res.move_as_ok();
|
block::Account* acc = acc_res.move_as_ok();
|
||||||
assert(acc);
|
assert(acc);
|
||||||
|
|
||||||
|
if (external) {
|
||||||
|
after_lt = std::max(after_lt, last_proc_int_msg_.first);
|
||||||
|
}
|
||||||
|
auto it = last_dispatch_queue_emitted_lt_.find(acc->addr);
|
||||||
|
if (it != last_dispatch_queue_emitted_lt_.end()) {
|
||||||
|
after_lt = std::max(after_lt, it->second);
|
||||||
|
}
|
||||||
auto res = impl_create_ordinary_transaction(msg_root, acc, now_, start_lt, &storage_phase_cfg_, &compute_phase_cfg_,
|
auto res = impl_create_ordinary_transaction(msg_root, acc, now_, start_lt, &storage_phase_cfg_, &compute_phase_cfg_,
|
||||||
&action_phase_cfg_, external, last_proc_int_msg_.first);
|
&action_phase_cfg_, external, after_lt);
|
||||||
if (res.is_error()) {
|
if (res.is_error()) {
|
||||||
auto error = res.move_as_error();
|
auto error = res.move_as_error();
|
||||||
if (error.code() == -701) {
|
if (error.code() == -701) {
|
||||||
|
@ -2756,7 +2788,14 @@ Ref<vm::Cell> Collator::create_ordinary_transaction(Ref<vm::Cell> msg_root, bool
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
register_new_msgs(*trans);
|
td::optional<block::MsgMetadata> new_msg_metadata;
|
||||||
|
if (external || is_special_tx) {
|
||||||
|
new_msg_metadata = block::MsgMetadata{0, acc->workchain, acc->addr, trans->start_lt};
|
||||||
|
} else if (msg_metadata) {
|
||||||
|
new_msg_metadata = std::move(msg_metadata);
|
||||||
|
++new_msg_metadata.value().depth;
|
||||||
|
}
|
||||||
|
register_new_msgs(*trans, std::move(new_msg_metadata));
|
||||||
update_max_lt(acc->last_trans_end_lt_);
|
update_max_lt(acc->last_trans_end_lt_);
|
||||||
value_flow_.burned += trans->blackhole_burned;
|
value_flow_.burned += trans->blackhole_burned;
|
||||||
return trans_root;
|
return trans_root;
|
||||||
|
@ -2791,13 +2830,12 @@ td::Result<std::unique_ptr<block::transaction::Transaction>> Collator::impl_crea
|
||||||
<< ":" << acc->addr.to_hex() << " is too large");
|
<< ":" << acc->addr.to_hex() << " is too large");
|
||||||
}
|
}
|
||||||
auto trans_min_lt = lt;
|
auto trans_min_lt = lt;
|
||||||
if (external) {
|
// transactions processing external messages must have lt larger than all processed internal messages
|
||||||
// transactions processing external messages must have lt larger than all processed internal messages
|
// if account has deferred message processed in this block, the next transaction should have lt > emitted_lt
|
||||||
trans_min_lt = std::max(trans_min_lt, after_lt);
|
trans_min_lt = std::max(trans_min_lt, after_lt);
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<block::transaction::Transaction> trans =
|
std::unique_ptr<block::transaction::Transaction> trans = std::make_unique<block::transaction::Transaction>(
|
||||||
std::make_unique<block::transaction::Transaction>(*acc, block::transaction::Transaction::tr_ord, trans_min_lt + 1, utime, msg_root);
|
*acc, block::transaction::Transaction::tr_ord, trans_min_lt + 1, utime, msg_root);
|
||||||
bool ihr_delivered = false; // FIXME
|
bool ihr_delivered = false; // FIXME
|
||||||
if (!trans->unpack_input_msg(ihr_delivered, action_phase_cfg)) {
|
if (!trans->unpack_input_msg(ihr_delivered, action_phase_cfg)) {
|
||||||
if (external) {
|
if (external) {
|
||||||
|
@ -2948,7 +2986,7 @@ bool Collator::is_our_address(const ton::StdSmcAddress& addr) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Processes a message generated in this block.
|
* Processes a message generated in this block or a message from DispatchQueue.
|
||||||
*
|
*
|
||||||
* @param msg The new message to be processed.
|
* @param msg The new message to be processed.
|
||||||
* @param enqueue_only Flag indicating whether the message should only be enqueued.
|
* @param enqueue_only Flag indicating whether the message should only be enqueued.
|
||||||
|
@ -2961,6 +2999,7 @@ bool Collator::is_our_address(const ton::StdSmcAddress& addr) const {
|
||||||
* -1 - error occured.
|
* -1 - error occured.
|
||||||
*/
|
*/
|
||||||
int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, Ref<vm::Cell>* is_special) {
|
int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, Ref<vm::Cell>* is_special) {
|
||||||
|
bool from_dispatch_queue = msg.msg_env_from_dispatch_queue.not_null();
|
||||||
Ref<vm::CellSlice> src, dest;
|
Ref<vm::CellSlice> src, dest;
|
||||||
bool enqueue, external;
|
bool enqueue, external;
|
||||||
auto cs = load_cell_slice(msg.msg);
|
auto cs = load_cell_slice(msg.msg);
|
||||||
|
@ -2972,7 +3011,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
if (!tlb::unpack(cs, info)) {
|
if (!tlb::unpack(cs, info)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
CHECK(info.created_lt == msg.lt && info.created_at == now_);
|
CHECK(info.created_lt == msg.lt && info.created_at == now_ && !from_dispatch_queue);
|
||||||
src = std::move(info.src);
|
src = std::move(info.src);
|
||||||
enqueue = external = true;
|
enqueue = external = true;
|
||||||
break;
|
break;
|
||||||
|
@ -2982,7 +3021,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
if (!tlb::unpack(cs, info)) {
|
if (!tlb::unpack(cs, info)) {
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
CHECK(info.created_lt == msg.lt && info.created_at == now_);
|
CHECK(from_dispatch_queue || (info.created_lt == msg.lt && info.created_at == now_));
|
||||||
src = std::move(info.src);
|
src = std::move(info.src);
|
||||||
dest = std::move(info.dest);
|
dest = std::move(info.dest);
|
||||||
fwd_fees = block::tlb::t_Grams.as_integer(info.fwd_fee);
|
fwd_fees = block::tlb::t_Grams.as_integer(info.fwd_fee);
|
||||||
|
@ -2994,7 +3033,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
default:
|
default:
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
CHECK(is_our_address(std::move(src)));
|
CHECK(is_our_address(src));
|
||||||
if (external) {
|
if (external) {
|
||||||
// 1. construct a msg_export_ext OutMsg
|
// 1. construct a msg_export_ext OutMsg
|
||||||
vm::CellBuilder cb;
|
vm::CellBuilder cb;
|
||||||
|
@ -3006,9 +3045,44 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
// (if ever a structure in the block for listing all external outbound messages appears, insert this message there as well)
|
// (if ever a structure in the block for listing all external outbound messages appears, insert this message there as well)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (enqueue) {
|
|
||||||
auto lt = msg.lt;
|
WorkchainId src_wc;
|
||||||
bool ok = enqueue_message(std::move(msg), std::move(fwd_fees), lt);
|
StdSmcAddress src_addr;
|
||||||
|
CHECK(block::tlb::t_MsgAddressInt.extract_std_address(src, src_wc, src_addr));
|
||||||
|
CHECK(src_wc == workchain());
|
||||||
|
bool is_special_account = is_masterchain() && config_->is_special_smartcontract(src_addr);
|
||||||
|
bool defer = false;
|
||||||
|
if (!from_dispatch_queue) {
|
||||||
|
if (deferring_messages_enabled_ && !is_special && !is_special_account && msg.msg_idx != 0) {
|
||||||
|
if (++sender_generated_messages_count_[src_addr] >= DEFER_MESSAGES_AFTER) {
|
||||||
|
defer = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (dispatch_queue_->lookup(src_addr).not_null() || unprocessed_deferred_messages_.count(src_addr)) {
|
||||||
|
defer = true;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
auto &x = unprocessed_deferred_messages_[src_addr];
|
||||||
|
CHECK(x > 0);
|
||||||
|
if (--x == 0) {
|
||||||
|
unprocessed_deferred_messages_.erase(src_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (enqueue || defer) {
|
||||||
|
bool ok;
|
||||||
|
if (from_dispatch_queue) {
|
||||||
|
auto msg_env = msg.msg_env_from_dispatch_queue;
|
||||||
|
block::tlb::MsgEnvelope::Record_std env;
|
||||||
|
CHECK(block::tlb::unpack_cell(msg_env, env));
|
||||||
|
auto src_prefix = block::tlb::MsgAddressInt::get_prefix(src);
|
||||||
|
auto dest_prefix = block::tlb::MsgAddressInt::get_prefix(dest);
|
||||||
|
CHECK(env.emitted_lt && env.emitted_lt.value() == msg.lt);
|
||||||
|
ok = enqueue_transit_message(std::move(msg.msg), std::move(msg_env), src_prefix, src_prefix, dest_prefix,
|
||||||
|
std::move(env.fwd_fee_remaining), std::move(env.metadata), msg.lt);
|
||||||
|
} else {
|
||||||
|
ok = enqueue_message(std::move(msg), std::move(fwd_fees), src_addr, defer);
|
||||||
|
}
|
||||||
return ok ? 0 : -1;
|
return ok ? 0 : -1;
|
||||||
}
|
}
|
||||||
// process message by a transaction in this block:
|
// process message by a transaction in this block:
|
||||||
|
@ -3019,26 +3093,36 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
// 1. create a Transaction processing this Message
|
// 1. create a Transaction processing this Message
|
||||||
auto trans_root = create_ordinary_transaction(msg.msg, is_special != nullptr);
|
auto trans_root = create_ordinary_transaction(msg.msg, msg.metadata, msg.lt, is_special != nullptr);
|
||||||
if (trans_root.is_null()) {
|
if (trans_root.is_null()) {
|
||||||
fatal_error("cannot create transaction for re-processing output message");
|
fatal_error("cannot create transaction for re-processing output message");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
// 2. create a MsgEnvelope enveloping this Message
|
// 2. create a MsgEnvelope enveloping this Message
|
||||||
vm::CellBuilder cb;
|
block::tlb::MsgEnvelope::Record_std msg_env_rec{0x60, 0x60, fwd_fees, msg.msg, {}, msg.metadata};
|
||||||
CHECK(cb.store_long_bool(0x46060, 20) // msg_envelope#4 cur_addr:.. next_addr:..
|
Ref<vm::Cell> msg_env;
|
||||||
&& block::tlb::t_Grams.store_integer_ref(cb, fwd_fees) // fwd_fee_remaining:t_Grams
|
CHECK(block::tlb::pack_cell(msg_env, msg_env_rec));
|
||||||
&& cb.store_ref_bool(msg.msg)); // msg:^(Message Any)
|
|
||||||
Ref<vm::Cell> msg_env = cb.finalize();
|
|
||||||
if (verbosity > 2) {
|
if (verbosity > 2) {
|
||||||
std::cerr << "new (processed outbound) message envelope: ";
|
std::cerr << "new (processed outbound) message envelope: ";
|
||||||
block::gen::t_MsgEnvelope.print_ref(std::cerr, msg_env);
|
block::gen::t_MsgEnvelope.print_ref(std::cerr, msg_env);
|
||||||
}
|
}
|
||||||
// 3. create InMsg, referring to this MsgEnvelope and this Transaction
|
// 3. create InMsg, referring to this MsgEnvelope and this Transaction
|
||||||
CHECK(cb.store_long_bool(3, 3) // msg_import_imm$011
|
vm::CellBuilder cb;
|
||||||
&& cb.store_ref_bool(msg_env) // in_msg:^MsgEnvelope
|
if (from_dispatch_queue) {
|
||||||
&& cb.store_ref_bool(trans_root) // transaction:^Transaction
|
auto msg_env = msg.msg_env_from_dispatch_queue;
|
||||||
&& block::tlb::t_Grams.store_integer_ref(cb, fwd_fees)); // fwd_fee:Grams
|
block::tlb::MsgEnvelope::Record_std env;
|
||||||
|
CHECK(block::tlb::unpack_cell(msg_env, env));
|
||||||
|
CHECK(env.emitted_lt && env.emitted_lt.value() == msg.lt);
|
||||||
|
CHECK(cb.store_long_bool(0b00100, 5) // msg_import_deferred_fin$00100
|
||||||
|
&& cb.store_ref_bool(msg_env) // in_msg:^MsgEnvelope
|
||||||
|
&& cb.store_ref_bool(trans_root) // transaction:^Transaction
|
||||||
|
&& block::tlb::t_Grams.store_integer_ref(cb, env.fwd_fee_remaining)); // fwd_fee:Grams
|
||||||
|
} else {
|
||||||
|
CHECK(cb.store_long_bool(3, 3) // msg_import_imm$011
|
||||||
|
&& cb.store_ref_bool(msg_env) // in_msg:^MsgEnvelope
|
||||||
|
&& cb.store_ref_bool(trans_root) // transaction:^Transaction
|
||||||
|
&& block::tlb::t_Grams.store_integer_ref(cb, fwd_fees)); // fwd_fee:Grams
|
||||||
|
}
|
||||||
// 4. insert InMsg into InMsgDescr
|
// 4. insert InMsg into InMsgDescr
|
||||||
Ref<vm::Cell> in_msg = cb.finalize();
|
Ref<vm::Cell> in_msg = cb.finalize();
|
||||||
if (!insert_in_msg(in_msg)) {
|
if (!insert_in_msg(in_msg)) {
|
||||||
|
@ -3049,14 +3133,16 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
*is_special = in_msg;
|
*is_special = in_msg;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
// 5. create OutMsg, referring to this MsgEnvelope and InMsg
|
if (!from_dispatch_queue) {
|
||||||
CHECK(cb.store_long_bool(2, 3) // msg_export_imm$010
|
// 5. create OutMsg, referring to this MsgEnvelope and InMsg
|
||||||
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
CHECK(cb.store_long_bool(2, 3) // msg_export_imm$010
|
||||||
&& cb.store_ref_bool(msg.trans) // transaction:^Transaction
|
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
||||||
&& cb.store_ref_bool(in_msg)); // reimport:^InMsg
|
&& cb.store_ref_bool(msg.trans) // transaction:^Transaction
|
||||||
// 6. insert OutMsg into OutMsgDescr
|
&& cb.store_ref_bool(in_msg)); // reimport:^InMsg
|
||||||
if (!insert_out_msg(cb.finalize())) {
|
// 6. insert OutMsg into OutMsgDescr
|
||||||
return -1;
|
if (!insert_out_msg(cb.finalize())) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// 7. check whether the block is full now
|
// 7. check whether the block is full now
|
||||||
if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) {
|
if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) {
|
||||||
|
@ -3081,41 +3167,61 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R
|
||||||
* @param cur_prefix The account ID prefix for the next hop.
|
* @param cur_prefix The account ID prefix for the next hop.
|
||||||
* @param dest_prefix The prefix of the destination account ID.
|
* @param dest_prefix The prefix of the destination account ID.
|
||||||
* @param fwd_fee_remaining The remaining forward fee.
|
* @param fwd_fee_remaining The remaining forward fee.
|
||||||
|
* @param msg_metadata Metadata of the message.
|
||||||
|
* @param emitted_lt If present - the message was taken from DispatchQueue, and msg_env will have this emitted_lt.
|
||||||
*
|
*
|
||||||
* @returns True if the transit message is successfully enqueued, false otherwise.
|
* @returns True if the transit message is successfully enqueued, false otherwise.
|
||||||
*/
|
*/
|
||||||
bool Collator::enqueue_transit_message(Ref<vm::Cell> msg, Ref<vm::Cell> old_msg_env,
|
bool Collator::enqueue_transit_message(Ref<vm::Cell> msg, Ref<vm::Cell> old_msg_env,
|
||||||
ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix,
|
ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix,
|
||||||
ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining) {
|
ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining,
|
||||||
LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256);
|
td::optional<block::MsgMetadata> msg_metadata,
|
||||||
bool requeue = is_our_address(prev_prefix);
|
td::optional<LogicalTime> emitted_lt) {
|
||||||
|
bool from_dispatch_queue = (bool)emitted_lt;
|
||||||
|
if (from_dispatch_queue) {
|
||||||
|
LOG(DEBUG) << "enqueueing message from dispatch queue " << msg->get_hash().bits().to_hex(256)
|
||||||
|
<< ", emitted_lt=" << emitted_lt.value();
|
||||||
|
} else {
|
||||||
|
LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256);
|
||||||
|
}
|
||||||
|
bool requeue = !from_dispatch_queue && is_our_address(prev_prefix) && !from_dispatch_queue;
|
||||||
// 1. perform hypercube routing
|
// 1. perform hypercube routing
|
||||||
auto route_info = block::perform_hypercube_routing(cur_prefix, dest_prefix, shard_);
|
auto route_info = block::perform_hypercube_routing(cur_prefix, dest_prefix, shard_);
|
||||||
if ((unsigned)route_info.first > 96 || (unsigned)route_info.second > 96) {
|
if ((unsigned)route_info.first > 96 || (unsigned)route_info.second > 96) {
|
||||||
return fatal_error("cannot perform hypercube routing for a transit message");
|
return fatal_error("cannot perform hypercube routing for a transit message");
|
||||||
}
|
}
|
||||||
// 2. compute our part of transit fees
|
// 2. compute our part of transit fees
|
||||||
td::RefInt256 transit_fee = action_phase_cfg_.fwd_std.get_next_part(fwd_fee_remaining);
|
td::RefInt256 transit_fee =
|
||||||
|
from_dispatch_queue ? td::zero_refint() : action_phase_cfg_.fwd_std.get_next_part(fwd_fee_remaining);
|
||||||
fwd_fee_remaining -= transit_fee;
|
fwd_fee_remaining -= transit_fee;
|
||||||
CHECK(td::sgn(transit_fee) >= 0 && td::sgn(fwd_fee_remaining) >= 0);
|
CHECK(td::sgn(transit_fee) >= 0 && td::sgn(fwd_fee_remaining) >= 0);
|
||||||
// 3. create a new MsgEnvelope
|
// 3. create a new MsgEnvelope
|
||||||
vm::CellBuilder cb;
|
block::tlb::MsgEnvelope::Record_std msg_env_rec{route_info.first, route_info.second, fwd_fee_remaining, msg,
|
||||||
CHECK(cb.store_long_bool(4, 4) // msg_envelope#4 cur_addr:.. next_addr:..
|
emitted_lt, std::move(msg_metadata)};
|
||||||
&& cb.store_long_bool(route_info.first, 8) // cur_addr:IntermediateAddress
|
Ref<vm::Cell> msg_env;
|
||||||
&& cb.store_long_bool(route_info.second, 8) // next_addr:IntermediateAddress
|
CHECK(block::tlb::t_MsgEnvelope.pack_cell(msg_env, msg_env_rec));
|
||||||
&& block::tlb::t_Grams.store_integer_ref(cb, fwd_fee_remaining) // fwd_fee_remaining:t_Grams
|
|
||||||
&& cb.store_ref_bool(msg)); // msg:^(Message Any)
|
|
||||||
Ref<vm::Cell> msg_env = cb.finalize();
|
|
||||||
// 4. create InMsg
|
// 4. create InMsg
|
||||||
CHECK(cb.store_long_bool(5, 3) // msg_import_tr$101
|
vm::CellBuilder cb;
|
||||||
&& cb.store_ref_bool(old_msg_env) // in_msg:^MsgEnvelope
|
if (from_dispatch_queue) {
|
||||||
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
CHECK(cb.store_long_bool(0b00101, 5) // msg_import_deferred_tr$00101
|
||||||
&& block::tlb::t_Grams.store_integer_ref(cb, transit_fee)); // transit_fee:Grams
|
&& cb.store_ref_bool(old_msg_env) // in_msg:^MsgEnvelope
|
||||||
|
&& cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope
|
||||||
|
} else {
|
||||||
|
CHECK(cb.store_long_bool(5, 3) // msg_import_tr$101
|
||||||
|
&& cb.store_ref_bool(old_msg_env) // in_msg:^MsgEnvelope
|
||||||
|
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
||||||
|
&& block::tlb::t_Grams.store_integer_ref(cb, transit_fee)); // transit_fee:Grams
|
||||||
|
}
|
||||||
Ref<vm::Cell> in_msg = cb.finalize();
|
Ref<vm::Cell> in_msg = cb.finalize();
|
||||||
// 5. create a new OutMsg
|
// 5. create a new OutMsg
|
||||||
CHECK(cb.store_long_bool(requeue ? 7 : 3, 3) // msg_export_tr$011 or msg_export_tr_req$111
|
// msg_export_tr$011 / msg_export_tr_req$111 / msg_export_deferred_tr$10101
|
||||||
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
if (from_dispatch_queue) {
|
||||||
&& cb.store_ref_bool(in_msg)); // imported:^InMsg
|
CHECK(cb.store_long_bool(0b10101, 5));
|
||||||
|
} else {
|
||||||
|
CHECK(cb.store_long_bool(requeue ? 7 : 3, 3));
|
||||||
|
}
|
||||||
|
CHECK(cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
||||||
|
&& cb.store_ref_bool(in_msg)); // imported:^InMsg
|
||||||
Ref<vm::Cell> out_msg = cb.finalize();
|
Ref<vm::Cell> out_msg = cb.finalize();
|
||||||
// 4.1. insert OutMsg into OutMsgDescr
|
// 4.1. insert OutMsg into OutMsgDescr
|
||||||
if (verbosity > 2) {
|
if (verbosity > 2) {
|
||||||
|
@ -3134,8 +3240,8 @@ bool Collator::enqueue_transit_message(Ref<vm::Cell> msg, Ref<vm::Cell> old_msg_
|
||||||
return fatal_error("cannot insert a new InMsg into InMsgDescr");
|
return fatal_error("cannot insert a new InMsg into InMsgDescr");
|
||||||
}
|
}
|
||||||
// 5. create EnqueuedMsg
|
// 5. create EnqueuedMsg
|
||||||
CHECK(cb.store_long_bool(start_lt) // _ enqueued_lt:uint64
|
CHECK(cb.store_long_bool(from_dispatch_queue ? emitted_lt.value() : start_lt) // _ enqueued_lt:uint64
|
||||||
&& cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope = EnqueuedMsg;
|
&& cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope = EnqueuedMsg;
|
||||||
// 6. insert EnqueuedMsg into OutMsgQueue
|
// 6. insert EnqueuedMsg into OutMsgQueue
|
||||||
// NB: we use here cur_prefix instead of src_prefix; should we check that route_info.first >= next_addr.use_dest_bits of the old envelope?
|
// NB: we use here cur_prefix instead of src_prefix; should we check that route_info.first >= next_addr.use_dest_bits of the old envelope?
|
||||||
auto next_hop = block::interpolate_addr(cur_prefix, dest_prefix, route_info.second);
|
auto next_hop = block::interpolate_addr(cur_prefix, dest_prefix, route_info.second);
|
||||||
|
@ -3237,9 +3343,14 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
|
||||||
LOG(ERROR) << "cannot unpack CommonMsgInfo of an inbound internal message";
|
LOG(ERROR) << "cannot unpack CommonMsgInfo of an inbound internal message";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (info.created_lt != lt) {
|
if (!env.emitted_lt && info.created_lt != lt) {
|
||||||
LOG(ERROR) << "inbound internal message has an augmentation value in source OutMsgQueue distinct from the one in "
|
LOG(ERROR) << "inbound internal message has an augmentation value in source OutMsgQueue distinct from the one in "
|
||||||
"its contents";
|
"its contents (CommonMsgInfo)";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (env.emitted_lt && env.emitted_lt.value() != lt) {
|
||||||
|
LOG(ERROR) << "inbound internal message has an augmentation value in source OutMsgQueue distinct from the one in "
|
||||||
|
"its contents (deferred_it in MsgEnvelope)";
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (!block::tlb::validate_message_libs(env.msg)) {
|
if (!block::tlb::validate_message_libs(env.msg)) {
|
||||||
|
@ -3302,7 +3413,8 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
|
||||||
bool our = ton::shard_contains(shard_, cur_prefix);
|
bool our = ton::shard_contains(shard_, cur_prefix);
|
||||||
bool to_us = ton::shard_contains(shard_, dest_prefix);
|
bool to_us = ton::shard_contains(shard_, dest_prefix);
|
||||||
|
|
||||||
block::EnqueuedMsgDescr enq_msg_descr{cur_prefix, next_prefix, info.created_lt, enqueued_lt,
|
block::EnqueuedMsgDescr enq_msg_descr{cur_prefix, next_prefix,
|
||||||
|
env.emitted_lt ? env.emitted_lt.value() : info.created_lt, enqueued_lt,
|
||||||
env.msg->get_hash().bits()};
|
env.msg->get_hash().bits()};
|
||||||
if (processed_upto_->already_processed(enq_msg_descr)) {
|
if (processed_upto_->already_processed(enq_msg_descr)) {
|
||||||
LOG(DEBUG) << "inbound internal message with lt=" << enq_msg_descr.lt_ << " hash=" << enq_msg_descr.hash_.to_hex()
|
LOG(DEBUG) << "inbound internal message with lt=" << enq_msg_descr.lt_ << " hash=" << enq_msg_descr.hash_.to_hex()
|
||||||
|
@ -3319,7 +3431,7 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
|
||||||
// destination is outside our shard, relay transit message
|
// destination is outside our shard, relay transit message
|
||||||
// (very similar to enqueue_message())
|
// (very similar to enqueue_message())
|
||||||
if (!enqueue_transit_message(std::move(env.msg), std::move(msg_env), cur_prefix, next_prefix, dest_prefix,
|
if (!enqueue_transit_message(std::move(env.msg), std::move(msg_env), cur_prefix, next_prefix, dest_prefix,
|
||||||
std::move(env.fwd_fee_remaining))) {
|
std::move(env.fwd_fee_remaining), std::move(env.metadata))) {
|
||||||
return fatal_error("cannot enqueue transit internal message with key "s + key.to_hex(352));
|
return fatal_error("cannot enqueue transit internal message with key "s + key.to_hex(352));
|
||||||
}
|
}
|
||||||
return !our || delete_out_msg_queue_msg(key);
|
return !our || delete_out_msg_queue_msg(key);
|
||||||
|
@ -3328,7 +3440,7 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
|
||||||
// process the message by an ordinary transaction similarly to process_one_new_message()
|
// process the message by an ordinary transaction similarly to process_one_new_message()
|
||||||
//
|
//
|
||||||
// 8. create a Transaction processing this Message
|
// 8. create a Transaction processing this Message
|
||||||
auto trans_root = create_ordinary_transaction(env.msg);
|
auto trans_root = create_ordinary_transaction(env.msg, env.metadata, 0);
|
||||||
if (trans_root.is_null()) {
|
if (trans_root.is_null()) {
|
||||||
return fatal_error("cannot create transaction for processing inbound message");
|
return fatal_error("cannot create transaction for processing inbound message");
|
||||||
}
|
}
|
||||||
|
@ -3368,6 +3480,9 @@ bool Collator::process_inbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalT
|
||||||
* @returns True if the processing was successful, false otherwise.
|
* @returns True if the processing was successful, false otherwise.
|
||||||
*/
|
*/
|
||||||
bool Collator::process_inbound_internal_messages() {
|
bool Collator::process_inbound_internal_messages() {
|
||||||
|
if (have_unprocessed_account_dispatch_queue_) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
while (!block_full_ && !nb_out_msgs_->is_eof()) {
|
while (!block_full_ && !nb_out_msgs_->is_eof()) {
|
||||||
block_full_ = !block_limit_status_->fits(block::ParamLimits::cl_normal);
|
block_full_ = !block_limit_status_->fits(block::ParamLimits::cl_normal);
|
||||||
if (block_full_) {
|
if (block_full_) {
|
||||||
|
@ -3476,7 +3591,7 @@ int Collator::process_external_message(Ref<vm::Cell> msg) {
|
||||||
}
|
}
|
||||||
// process message by a transaction in this block:
|
// process message by a transaction in this block:
|
||||||
// 1. create a Transaction processing this Message
|
// 1. create a Transaction processing this Message
|
||||||
auto trans_root = create_ordinary_transaction(msg);
|
auto trans_root = create_ordinary_transaction(msg, /* metadata = */ {}, 0);
|
||||||
if (trans_root.is_null()) {
|
if (trans_root.is_null()) {
|
||||||
if (busy_) {
|
if (busy_) {
|
||||||
// transaction rejected by account
|
// transaction rejected by account
|
||||||
|
@ -3500,6 +3615,222 @@ int Collator::process_external_message(Ref<vm::Cell> msg) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Processes messages from dispatch queue
|
||||||
|
*
|
||||||
|
* Messages from dispatch queue are taken in three steps:
|
||||||
|
* 1. Take one message from each account (in the order of lt)
|
||||||
|
* 2. Take up to 10 per account (including from p.1), up to 20 per initiator, up to 150 in total
|
||||||
|
* 3. Take up to X messages per initiator, up to 150 in total. X depends on out msg queue size
|
||||||
|
*
|
||||||
|
* @returns True if the processing was successful, false otherwise.
|
||||||
|
*/
|
||||||
|
bool Collator::process_dispatch_queue() {
|
||||||
|
have_unprocessed_account_dispatch_queue_ = true;
|
||||||
|
size_t max_total_count[3] = {1 << 30, 150, 150};
|
||||||
|
size_t max_per_initiator[3] = {1 << 30, 20, 0};
|
||||||
|
if (out_msg_queue_size_ <= 256) {
|
||||||
|
max_per_initiator[2] = 10;
|
||||||
|
} else if (out_msg_queue_size_ <= 512) {
|
||||||
|
max_per_initiator[2] = 2;
|
||||||
|
} else if (out_msg_queue_size_ <= 2048) {
|
||||||
|
max_per_initiator[2] = 1;
|
||||||
|
}
|
||||||
|
for (int iter = 0; iter < 3; ++iter) {
|
||||||
|
if (max_per_initiator[iter] == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
vm::AugmentedDictionary cur_dispatch_queue{dispatch_queue_->get_root(), 256, block::tlb::aug_DispatchQueue};
|
||||||
|
std::map<std::tuple<WorkchainId, StdSmcAddress, LogicalTime>, size_t> count_per_initiator;
|
||||||
|
size_t total_count = 0;
|
||||||
|
while (!cur_dispatch_queue.is_empty()) {
|
||||||
|
block_full_ = !block_limit_status_->fits(block::ParamLimits::cl_normal);
|
||||||
|
if (block_full_) {
|
||||||
|
LOG(INFO) << "BLOCK FULL, stop processing dispatch queue";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (soft_timeout_.is_in_past(td::Timestamp::now())) {
|
||||||
|
block_full_ = true;
|
||||||
|
LOG(WARNING) << "soft timeout reached, stop processing dispatch queue";
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
StdSmcAddress src_addr;
|
||||||
|
auto account_dispatch_queue = block::get_dispatch_queue_min_lt_account(cur_dispatch_queue, src_addr);
|
||||||
|
if (account_dispatch_queue.is_null()) {
|
||||||
|
return fatal_error("invalid dispatch queue in shard state");
|
||||||
|
}
|
||||||
|
vm::Dictionary dict{64};
|
||||||
|
td::uint64 dict_size;
|
||||||
|
if (!block::unpack_account_dispatch_queue(account_dispatch_queue, dict, dict_size)) {
|
||||||
|
return fatal_error(PSTRING() << "invalid account dispatch queue for account " << src_addr.to_hex());
|
||||||
|
}
|
||||||
|
td::BitArray<64> key;
|
||||||
|
Ref<vm::CellSlice> enqueued_msg = dict.extract_minmax_key(key.bits(), 64, false, false);
|
||||||
|
LogicalTime lt = key.to_ulong();
|
||||||
|
|
||||||
|
td::optional<block::MsgMetadata> msg_metadata;
|
||||||
|
if (!process_deferred_message(std::move(enqueued_msg), src_addr, lt, msg_metadata)) {
|
||||||
|
return fatal_error(PSTRING() << "error processing internal message from dispatch queue: account="
|
||||||
|
<< src_addr.to_hex() << ", lt=" << lt);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove message from DispatchQueue
|
||||||
|
bool ok;
|
||||||
|
if (iter == 0 || (iter == 1 && sender_generated_messages_count_[src_addr] >= DEFER_MESSAGES_AFTER)) {
|
||||||
|
ok = cur_dispatch_queue.lookup_delete(src_addr).not_null();
|
||||||
|
} else {
|
||||||
|
dict.lookup_delete(key);
|
||||||
|
--dict_size;
|
||||||
|
account_dispatch_queue = block::pack_account_dispatch_queue(dict, dict_size);
|
||||||
|
ok = account_dispatch_queue.not_null() ? cur_dispatch_queue.set(src_addr, account_dispatch_queue)
|
||||||
|
: cur_dispatch_queue.lookup_delete(src_addr).not_null();
|
||||||
|
}
|
||||||
|
if (!ok) {
|
||||||
|
return fatal_error(PSTRING() << "error processing internal message from dispatch queue: account="
|
||||||
|
<< src_addr.to_hex() << ", lt=" << lt);
|
||||||
|
}
|
||||||
|
if (msg_metadata) {
|
||||||
|
auto initiator = std::make_tuple(msg_metadata.value().initiator_wc, msg_metadata.value().initiator_addr,
|
||||||
|
msg_metadata.value().initiator_lt);
|
||||||
|
size_t initiator_count = ++count_per_initiator[initiator];
|
||||||
|
if (initiator_count >= max_per_initiator[iter]) {
|
||||||
|
cur_dispatch_queue.lookup_delete(src_addr);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++total_count;
|
||||||
|
if (total_count >= max_total_count[iter]) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (iter == 0) {
|
||||||
|
have_unprocessed_account_dispatch_queue_ = false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Processes an internal message from DispatchQueue.
|
||||||
|
* The message may create a transaction or be enqueued.
|
||||||
|
*
|
||||||
|
* Similar to Collator::process_inbound_message.
|
||||||
|
*
|
||||||
|
* @param enq_msg The internal message serialized using EnqueuedMsg TLB-scheme.
|
||||||
|
* @param src_addr 256-bit address of the sender.
|
||||||
|
* @param lt The logical time of the message.
|
||||||
|
* @param msg_metadata Reference to store msg_metadata
|
||||||
|
*
|
||||||
|
* @returns True if the message was processed successfully, false otherwise.
|
||||||
|
*/
|
||||||
|
bool Collator::process_deferred_message(Ref<vm::CellSlice> enq_msg, StdSmcAddress src_addr, LogicalTime lt,
|
||||||
|
td::optional<block::MsgMetadata>& msg_metadata) {
|
||||||
|
if (!block::remove_dispatch_queue_entry(*dispatch_queue_, src_addr, lt)) {
|
||||||
|
return fatal_error(PSTRING() << "failed to delete message from DispatchQueue: address=" << src_addr.to_hex()
|
||||||
|
<< ", lt=" << lt);
|
||||||
|
}
|
||||||
|
++dispatch_queue_ops_;
|
||||||
|
if (!(dispatch_queue_ops_ & 63)) {
|
||||||
|
if (!block_limit_status_->add_proof(dispatch_queue_->get_root_cell())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
++sender_generated_messages_count_[src_addr];
|
||||||
|
|
||||||
|
LogicalTime enqueued_lt = 0;
|
||||||
|
if (enq_msg.is_null() || enq_msg->size_ext() != 0x10040 || (enqueued_lt = enq_msg->prefetch_ulong(64)) != lt) {
|
||||||
|
if (enq_msg.not_null()) {
|
||||||
|
block::gen::t_EnqueuedMsg.print(std::cerr, *enq_msg);
|
||||||
|
}
|
||||||
|
LOG(ERROR) << "internal message in DispatchQueue is not a valid EnqueuedMsg (created lt " << lt << ", enqueued "
|
||||||
|
<< enqueued_lt << ")";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
auto msg_env = enq_msg->prefetch_ref();
|
||||||
|
CHECK(msg_env.not_null());
|
||||||
|
// 0. check MsgEnvelope
|
||||||
|
if (msg_env->get_level() != 0) {
|
||||||
|
LOG(ERROR) << "cannot import a message with non-zero level!";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!block::gen::t_MsgEnvelope.validate_ref(msg_env)) {
|
||||||
|
LOG(ERROR) << "MsgEnvelope from DispatchQueue is invalid according to automated checks";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!block::tlb::t_MsgEnvelope.validate_ref(msg_env)) {
|
||||||
|
LOG(ERROR) << "MsgEnvelope from DispatchQueue is invalid according to hand-written checks";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// 1. unpack MsgEnvelope
|
||||||
|
block::tlb::MsgEnvelope::Record_std env;
|
||||||
|
if (!tlb::unpack_cell(msg_env, env)) {
|
||||||
|
LOG(ERROR) << "cannot unpack MsgEnvelope from DispatchQueue";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// 2. unpack CommonMsgInfo of the message
|
||||||
|
vm::CellSlice cs{vm::NoVmOrd{}, env.msg};
|
||||||
|
if (block::gen::t_CommonMsgInfo.get_tag(cs) != block::gen::CommonMsgInfo::int_msg_info) {
|
||||||
|
LOG(ERROR) << "internal message from DispatchQueue is not in fact internal!";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
block::gen::CommonMsgInfo::Record_int_msg_info info;
|
||||||
|
if (!tlb::unpack(cs, info)) {
|
||||||
|
LOG(ERROR) << "cannot unpack CommonMsgInfo of an internal message from DispatchQueue";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (info.created_lt != lt) {
|
||||||
|
LOG(ERROR) << "internal message has lt in DispatchQueue distinct from the one in "
|
||||||
|
"its contents";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!block::tlb::validate_message_libs(env.msg)) {
|
||||||
|
LOG(ERROR) << "internal message in DispatchQueue has invalid StateInit";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// 2.1. check fwd_fee and fwd_fee_remaining
|
||||||
|
td::RefInt256 orig_fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee);
|
||||||
|
if (env.fwd_fee_remaining > orig_fwd_fee) {
|
||||||
|
LOG(ERROR) << "internal message if DispatchQueue has fwd_fee_remaining=" << td::dec_string(env.fwd_fee_remaining)
|
||||||
|
<< " larger than original fwd_fee=" << td::dec_string(orig_fwd_fee);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// 3. extract source and destination shards
|
||||||
|
auto src_prefix = block::tlb::t_MsgAddressInt.get_prefix(info.src);
|
||||||
|
auto dest_prefix = block::tlb::t_MsgAddressInt.get_prefix(info.dest);
|
||||||
|
if (!(src_prefix.is_valid() && dest_prefix.is_valid())) {
|
||||||
|
LOG(ERROR) << "internal message in DispatchQueue has invalid source or destination address";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// 4. chech current and next hop shards
|
||||||
|
if (env.cur_addr != 0 || env.next_addr != 0) {
|
||||||
|
LOG(ERROR) << "internal message in DispatchQueue is expected to have zero cur_addr and next_addr";
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// 5. calculate emitted_lt
|
||||||
|
LogicalTime emitted_lt = std::max(start_lt, last_dispatch_queue_emitted_lt_[src_addr]) + 1;
|
||||||
|
auto it = accounts.find(src_addr);
|
||||||
|
if (it != accounts.end()) {
|
||||||
|
emitted_lt = std::max(emitted_lt, it->second->last_trans_end_lt_ + 1);
|
||||||
|
}
|
||||||
|
last_dispatch_queue_emitted_lt_[src_addr] = emitted_lt;
|
||||||
|
update_max_lt(emitted_lt + 1);
|
||||||
|
|
||||||
|
env.emitted_lt = emitted_lt;
|
||||||
|
if (!block::tlb::pack_cell(msg_env, env)) {
|
||||||
|
return fatal_error("cannot pack msg envelope");
|
||||||
|
}
|
||||||
|
|
||||||
|
// 6. create NewOutMsg
|
||||||
|
block::NewOutMsg new_msg{emitted_lt, env.msg, {}, 0};
|
||||||
|
new_msg.metadata = env.metadata;
|
||||||
|
new_msg.msg_env_from_dispatch_queue = msg_env;
|
||||||
|
++unprocessed_deferred_messages_[src_addr];
|
||||||
|
LOG(INFO) << "delivering deferred message from account " << src_addr.to_hex() << ", lt=" << lt
|
||||||
|
<< ", emitted_lt=" << emitted_lt;
|
||||||
|
register_new_msg(std::move(new_msg));
|
||||||
|
msg_metadata = std::move(env.metadata);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Inserts an InMsg into the block's InMsgDescr.
|
* Inserts an InMsg into the block's InMsgDescr.
|
||||||
*
|
*
|
||||||
|
@ -3517,8 +3848,9 @@ bool Collator::insert_in_msg(Ref<vm::Cell> in_msg) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
Ref<vm::Cell> msg = cs.prefetch_ref();
|
Ref<vm::Cell> msg = cs.prefetch_ref();
|
||||||
int tag = (int)cs.prefetch_ulong(3);
|
int tag = block::gen::t_InMsg.get_tag(cs);
|
||||||
if (!(tag == 0 || tag == 2)) { // msg_import_ext$000 or msg_import_ihr$010 contain (Message Any) directly
|
// msg_import_ext$000 or msg_import_ihr$010 contain (Message Any) directly
|
||||||
|
if (!(tag == block::gen::InMsg::msg_import_ext || tag == block::gen::InMsg::msg_import_ihr)) {
|
||||||
// extract Message Any from MsgEnvelope to compute correct key
|
// extract Message Any from MsgEnvelope to compute correct key
|
||||||
auto cs2 = load_cell_slice(std::move(msg));
|
auto cs2 = load_cell_slice(std::move(msg));
|
||||||
if (!cs2.size_refs()) {
|
if (!cs2.size_refs()) {
|
||||||
|
@ -3599,11 +3931,15 @@ bool Collator::insert_out_msg(Ref<vm::Cell> out_msg, td::ConstBitPtr msg_hash) {
|
||||||
*
|
*
|
||||||
* @param msg The new outbound message to enqueue.
|
* @param msg The new outbound message to enqueue.
|
||||||
* @param fwd_fees_remaining The remaining forward fees for the message.
|
* @param fwd_fees_remaining The remaining forward fees for the message.
|
||||||
* @param enqueued_lt The logical time at which the message is enqueued.
|
* @param src_addr 256-bit address of the sender
|
||||||
|
* @param defer Put the message to DispatchQueue
|
||||||
*
|
*
|
||||||
* @returns True if the message was successfully enqueued, false otherwise.
|
* @returns True if the message was successfully enqueued, false otherwise.
|
||||||
*/
|
*/
|
||||||
bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt) {
|
bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, StdSmcAddress src_addr,
|
||||||
|
bool defer) {
|
||||||
|
LogicalTime enqueued_lt = msg.lt;
|
||||||
|
CHECK(msg.msg_env_from_dispatch_queue.is_null());
|
||||||
// 0. unpack src_addr and dest_addr
|
// 0. unpack src_addr and dest_addr
|
||||||
block::gen::CommonMsgInfo::Record_int_msg_info info;
|
block::gen::CommonMsgInfo::Record_int_msg_info info;
|
||||||
if (!tlb::unpack_cell_inexact(msg.msg, info)) {
|
if (!tlb::unpack_cell_inexact(msg.msg, info)) {
|
||||||
|
@ -3623,18 +3959,24 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema
|
||||||
return fatal_error("cannot perform hypercube routing for a new outbound message");
|
return fatal_error("cannot perform hypercube routing for a new outbound message");
|
||||||
}
|
}
|
||||||
// 2. create a new MsgEnvelope
|
// 2. create a new MsgEnvelope
|
||||||
vm::CellBuilder cb;
|
block::tlb::MsgEnvelope::Record_std msg_env_rec{
|
||||||
CHECK(cb.store_long_bool(4, 4) // msg_envelope#4 cur_addr:.. next_addr:..
|
defer ? 0 : route_info.first, defer ? 0 : route_info.second, fwd_fees_remaining, msg.msg, {}, msg.metadata};
|
||||||
&& cb.store_long_bool(route_info.first, 8) // cur_addr:IntermediateAddress
|
Ref<vm::Cell> msg_env;
|
||||||
&& cb.store_long_bool(route_info.second, 8) // next_addr:IntermediateAddress
|
CHECK(block::tlb::pack_cell(msg_env, msg_env_rec));
|
||||||
&& block::tlb::t_Grams.store_integer_ref(cb, fwd_fees_remaining) // fwd_fee_remaining:t_Grams
|
|
||||||
&& cb.store_ref_bool(msg.msg)); // msg:^(Message Any)
|
|
||||||
Ref<vm::Cell> msg_env = cb.finalize();
|
|
||||||
// 3. create a new OutMsg
|
// 3. create a new OutMsg
|
||||||
CHECK(cb.store_long_bool(1, 3) // msg_export_new$001
|
vm::CellBuilder cb;
|
||||||
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
Ref<vm::Cell> out_msg;
|
||||||
&& cb.store_ref_bool(msg.trans)); // transaction:^Transaction
|
if (defer) {
|
||||||
Ref<vm::Cell> out_msg = cb.finalize();
|
CHECK(cb.store_long_bool(0b10100, 5) // msg_export_new_defer$10100
|
||||||
|
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
||||||
|
&& cb.store_ref_bool(msg.trans)); // transaction:^Transaction
|
||||||
|
out_msg = cb.finalize();
|
||||||
|
} else {
|
||||||
|
CHECK(cb.store_long_bool(1, 3) // msg_export_new$001
|
||||||
|
&& cb.store_ref_bool(msg_env) // out_msg:^MsgEnvelope
|
||||||
|
&& cb.store_ref_bool(msg.trans)); // transaction:^Transaction
|
||||||
|
out_msg = cb.finalize();
|
||||||
|
}
|
||||||
// 4. insert OutMsg into OutMsgDescr
|
// 4. insert OutMsg into OutMsgDescr
|
||||||
if (verbosity > 2) {
|
if (verbosity > 2) {
|
||||||
std::cerr << "OutMsg for a newly-generated message: ";
|
std::cerr << "OutMsg for a newly-generated message: ";
|
||||||
|
@ -3646,7 +3988,30 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema
|
||||||
// 5. create EnqueuedMsg
|
// 5. create EnqueuedMsg
|
||||||
CHECK(cb.store_long_bool(enqueued_lt) // _ enqueued_lt:uint64
|
CHECK(cb.store_long_bool(enqueued_lt) // _ enqueued_lt:uint64
|
||||||
&& cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope = EnqueuedMsg;
|
&& cb.store_ref_bool(msg_env)); // out_msg:^MsgEnvelope = EnqueuedMsg;
|
||||||
// 6. insert EnqueuedMsg into OutMsgQueue
|
|
||||||
|
// 6. insert EnqueuedMsg into OutMsgQueue (or DispatchQueue)
|
||||||
|
if (defer) {
|
||||||
|
LOG(INFO) << "deferring new message from account " << workchain() << ":" << src_addr.to_hex() << ", lt=" << msg.lt;
|
||||||
|
vm::Dictionary dispatch_dict{64};
|
||||||
|
td::uint64 dispatch_dict_size;
|
||||||
|
if (!block::unpack_account_dispatch_queue(dispatch_queue_->lookup(src_addr), dispatch_dict, dispatch_dict_size)) {
|
||||||
|
return fatal_error(PSTRING() << "cannot unpack AccountDispatchQueue for account " << src_addr.to_hex());
|
||||||
|
}
|
||||||
|
td::BitArray<64> key;
|
||||||
|
key.store_ulong(msg.lt);
|
||||||
|
if (!dispatch_dict.set_builder(key, cb, vm::Dictionary::SetMode::Add)) {
|
||||||
|
return fatal_error(PSTRING() << "cannot add message to AccountDispatchQueue for account " << src_addr.to_hex()
|
||||||
|
<< ", lt=" << msg.lt);
|
||||||
|
}
|
||||||
|
++dispatch_dict_size;
|
||||||
|
dispatch_queue_->set(src_addr, block::pack_account_dispatch_queue(dispatch_dict, dispatch_dict_size));
|
||||||
|
++dispatch_queue_ops_;
|
||||||
|
if (!(dispatch_queue_ops_ & 63)) {
|
||||||
|
return block_limit_status_->add_proof(dispatch_queue_->get_root_cell());
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
auto next_hop = block::interpolate_addr(src_prefix, dest_prefix, route_info.second);
|
auto next_hop = block::interpolate_addr(src_prefix, dest_prefix, route_info.second);
|
||||||
td::BitArray<32 + 64 + 256> key;
|
td::BitArray<32 + 64 + 256> key;
|
||||||
key.bits().store_int(next_hop.workchain, 32);
|
key.bits().store_int(next_hop.workchain, 32);
|
||||||
|
@ -3680,7 +4045,7 @@ bool Collator::process_new_messages(bool enqueue_only) {
|
||||||
block::NewOutMsg msg = new_msgs.top();
|
block::NewOutMsg msg = new_msgs.top();
|
||||||
new_msgs.pop();
|
new_msgs.pop();
|
||||||
block_limit_status_->extra_out_msgs--;
|
block_limit_status_->extra_out_msgs--;
|
||||||
if (block_full_ && !enqueue_only) {
|
if ((block_full_ || have_unprocessed_account_dispatch_queue_) && !enqueue_only) {
|
||||||
LOG(INFO) << "BLOCK FULL, enqueue all remaining new messages";
|
LOG(INFO) << "BLOCK FULL, enqueue all remaining new messages";
|
||||||
enqueue_only = true;
|
enqueue_only = true;
|
||||||
}
|
}
|
||||||
|
@ -3713,11 +4078,17 @@ void Collator::register_new_msg(block::NewOutMsg new_msg) {
|
||||||
* Registers new messages that were created in the transaction.
|
* Registers new messages that were created in the transaction.
|
||||||
*
|
*
|
||||||
* @param trans The transaction containing the messages.
|
* @param trans The transaction containing the messages.
|
||||||
|
* @param msg_metadata Metadata of the new messages.
|
||||||
*/
|
*/
|
||||||
void Collator::register_new_msgs(block::transaction::Transaction& trans) {
|
void Collator::register_new_msgs(block::transaction::Transaction& trans,
|
||||||
|
td::optional<block::MsgMetadata> msg_metadata) {
|
||||||
CHECK(trans.root.not_null());
|
CHECK(trans.root.not_null());
|
||||||
for (unsigned i = 0; i < trans.out_msgs.size(); i++) {
|
for (unsigned i = 0; i < trans.out_msgs.size(); i++) {
|
||||||
register_new_msg(trans.extract_out_msg_ext(i));
|
block::NewOutMsg msg = trans.extract_out_msg_ext(i);
|
||||||
|
if (msg_metadata_enabled_) {
|
||||||
|
msg.metadata = msg_metadata;
|
||||||
|
}
|
||||||
|
register_new_msg(std::move(msg));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4617,9 +4988,27 @@ bool Collator::compute_out_msg_queue_info(Ref<vm::Cell>& out_msg_queue_info) {
|
||||||
rt->print_rec(std::cerr);
|
rt->print_rec(std::cerr);
|
||||||
}
|
}
|
||||||
vm::CellBuilder cb;
|
vm::CellBuilder cb;
|
||||||
|
// out_msg_queue_extra#0 dispatch_queue:DispatchQueue out_queue_size:(Maybe uint48) = OutMsgQueueExtra;
|
||||||
|
// ... extra:(Maybe OutMsgQueueExtra)
|
||||||
|
if (!dispatch_queue_->is_empty() || store_out_msg_queue_size_) {
|
||||||
|
if (!(cb.store_long_bool(1, 1) && cb.store_long_bool(0, 4) && dispatch_queue_->append_dict_to_bool(cb))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
if (!(cb.store_bool_bool(store_out_msg_queue_size_) &&
|
||||||
|
(!store_out_msg_queue_size_ || cb.store_long_bool(out_msg_queue_size_, 48)))) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (!cb.store_long_bool(0, 1)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vm::CellSlice maybe_extra = cb.as_cellslice();
|
||||||
|
cb.reset();
|
||||||
|
|
||||||
return register_out_msg_queue_op(true) && out_msg_queue_->append_dict_to_bool(cb) // _ out_queue:OutMsgQueue
|
return register_out_msg_queue_op(true) && out_msg_queue_->append_dict_to_bool(cb) // _ out_queue:OutMsgQueue
|
||||||
&& processed_upto_->pack(cb) // proc_info:ProcessedInfo
|
&& processed_upto_->pack(cb) // proc_info:ProcessedInfo
|
||||||
&& ihr_pending->append_dict_to_bool(cb) // ihr_pending:IhrPendingInfo
|
&& cb.append_cellslice_bool(maybe_extra) // extra:(Maybe OutMsgQueueExtra)
|
||||||
&& cb.finalize_to(out_msg_queue_info);
|
&& cb.finalize_to(out_msg_queue_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -287,6 +287,9 @@ void LiteQuery::perform() {
|
||||||
[&](lite_api::liteServer_getOutMsgQueueSizes& q) {
|
[&](lite_api::liteServer_getOutMsgQueueSizes& q) {
|
||||||
this->perform_getOutMsgQueueSizes(q.mode_ & 1 ? ShardIdFull(q.wc_, q.shard_) : td::optional<ShardIdFull>());
|
this->perform_getOutMsgQueueSizes(q.mode_ & 1 ? ShardIdFull(q.wc_, q.shard_) : td::optional<ShardIdFull>());
|
||||||
},
|
},
|
||||||
|
[&](lite_api::liteServer_getBlockOutMsgQueueSize& q) {
|
||||||
|
this->perform_getBlockOutMsgQueueSize(q.mode_, create_block_id(q.id_));
|
||||||
|
},
|
||||||
[&](auto& obj) { this->abort_query(td::Status::Error(ErrorCode::protoviolation, "unknown query")); }));
|
[&](auto& obj) { this->abort_query(td::Status::Error(ErrorCode::protoviolation, "unknown query")); }));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2376,6 +2379,45 @@ void LiteQuery::perform_listBlockTransactions(BlockIdExt blkid, int mode, int co
|
||||||
request_block_data(blkid);
|
request_block_data(blkid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static td::Result<tl_object_ptr<lite_api::liteServer_transactionMetadata>> get_in_msg_metadata(
|
||||||
|
const Ref<vm::Cell>& in_msg_descr_root, const Ref<vm::Cell>& trans_root) {
|
||||||
|
vm::AugmentedDictionary in_msg_descr{vm::load_cell_slice_ref(in_msg_descr_root), 256, block::tlb::aug_InMsgDescr};
|
||||||
|
block::gen::Transaction::Record transaction;
|
||||||
|
if (!block::tlb::unpack_cell(trans_root, transaction)) {
|
||||||
|
return td::Status::Error("invalid Transaction in block");
|
||||||
|
}
|
||||||
|
Ref<vm::Cell> msg = transaction.r1.in_msg->prefetch_ref();
|
||||||
|
if (msg.is_null()) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
td::Bits256 in_msg_hash = msg->get_hash().bits();
|
||||||
|
Ref<vm::CellSlice> in_msg = in_msg_descr.lookup(in_msg_hash);
|
||||||
|
if (in_msg.is_null()) {
|
||||||
|
return td::Status::Error(PSTRING() << "no InMsg in InMsgDescr for message with hash " << in_msg_hash.to_hex());
|
||||||
|
}
|
||||||
|
int tag = block::gen::t_InMsg.get_tag(*in_msg);
|
||||||
|
if (tag != block::gen::InMsg::msg_import_imm && tag != block::gen::InMsg::msg_import_fin &&
|
||||||
|
tag != block::gen::InMsg::msg_import_deferred_fin) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
Ref<vm::Cell> msg_env = in_msg->prefetch_ref();
|
||||||
|
if (msg_env.is_null()) {
|
||||||
|
return td::Status::Error(PSTRING() << "no MsgEnvelope in InMsg for message with hash " << in_msg_hash.to_hex());
|
||||||
|
}
|
||||||
|
block::tlb::MsgEnvelope::Record_std env;
|
||||||
|
if (!block::tlb::unpack_cell(std::move(msg_env), env)) {
|
||||||
|
return td::Status::Error(PSTRING() << "failed to unpack MsgEnvelope for message with hash " << in_msg_hash.to_hex());
|
||||||
|
}
|
||||||
|
if (!env.metadata) {
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
block::MsgMetadata& metadata = env.metadata.value();
|
||||||
|
return create_tl_object<lite_api::liteServer_transactionMetadata>(
|
||||||
|
0, metadata.depth,
|
||||||
|
create_tl_object<lite_api::liteServer_accountId>(metadata.initiator_wc, metadata.initiator_addr),
|
||||||
|
metadata.initiator_lt);
|
||||||
|
}
|
||||||
|
|
||||||
void LiteQuery::finish_listBlockTransactions(int mode, int req_count) {
|
void LiteQuery::finish_listBlockTransactions(int mode, int req_count) {
|
||||||
LOG(INFO) << "completing a listBlockTransactions(" << base_blk_id_.to_str() << ", " << mode << ", " << req_count
|
LOG(INFO) << "completing a listBlockTransactions(" << base_blk_id_.to_str() << ", " << mode << ", " << req_count
|
||||||
<< ", " << acc_addr_.to_hex() << ", " << trans_lt_ << ") liteserver query";
|
<< ", " << acc_addr_.to_hex() << ", " << trans_lt_ << ") liteserver query";
|
||||||
|
@ -2395,6 +2437,8 @@ void LiteQuery::finish_listBlockTransactions(int mode, int req_count) {
|
||||||
acc_addr_.set_ones();
|
acc_addr_.set_ones();
|
||||||
trans_lt_ = ~0ULL;
|
trans_lt_ = ~0ULL;
|
||||||
}
|
}
|
||||||
|
bool with_metadata = mode & 256;
|
||||||
|
mode &= ~256;
|
||||||
std::vector<tl_object_ptr<lite_api::liteServer_transactionId>> result;
|
std::vector<tl_object_ptr<lite_api::liteServer_transactionId>> result;
|
||||||
bool eof = false;
|
bool eof = false;
|
||||||
ton::LogicalTime reverse = (mode & 64) ? ~0ULL : 0;
|
ton::LogicalTime reverse = (mode & 64) ? ~0ULL : 0;
|
||||||
|
@ -2448,8 +2492,18 @@ void LiteQuery::finish_listBlockTransactions(int mode, int req_count) {
|
||||||
trans_lt_ = reverse;
|
trans_lt_ = reverse;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
result.push_back(create_tl_object<lite_api::liteServer_transactionId>(mode, cur_addr, cur_trans.to_long(),
|
tl_object_ptr<lite_api::liteServer_transactionMetadata> metadata;
|
||||||
tvalue->get_hash().bits()));
|
if (with_metadata) {
|
||||||
|
auto r_metadata = get_in_msg_metadata(extra.in_msg_descr, tvalue);
|
||||||
|
if (r_metadata.is_error()) {
|
||||||
|
fatal_error(r_metadata.move_as_error());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
metadata = r_metadata.move_as_ok();
|
||||||
|
}
|
||||||
|
result.push_back(create_tl_object<lite_api::liteServer_transactionId>(
|
||||||
|
mode | (metadata ? 256 : 0), cur_addr, cur_trans.to_long(), tvalue->get_hash().bits(),
|
||||||
|
std::move(metadata)));
|
||||||
++count;
|
++count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2484,6 +2538,36 @@ void LiteQuery::perform_listBlockTransactionsExt(BlockIdExt blkid, int mode, int
|
||||||
request_block_data(blkid);
|
request_block_data(blkid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static td::Status process_all_in_msg_metadata(const Ref<vm::Cell>& in_msg_descr_root,
|
||||||
|
const std::vector<Ref<vm::Cell>>& trans_roots) {
|
||||||
|
vm::AugmentedDictionary in_msg_descr{vm::load_cell_slice_ref(in_msg_descr_root), 256, block::tlb::aug_InMsgDescr};
|
||||||
|
for (const Ref<vm::Cell>& trans_root : trans_roots) {
|
||||||
|
block::gen::Transaction::Record transaction;
|
||||||
|
if (!block::tlb::unpack_cell(trans_root, transaction)) {
|
||||||
|
return td::Status::Error("invalid Transaction in block");
|
||||||
|
}
|
||||||
|
Ref<vm::Cell> msg = transaction.r1.in_msg->prefetch_ref();
|
||||||
|
if (msg.is_null()) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
td::Bits256 in_msg_hash = msg->get_hash().bits();
|
||||||
|
Ref<vm::CellSlice> in_msg = in_msg_descr.lookup(in_msg_hash);
|
||||||
|
if (in_msg.is_null()) {
|
||||||
|
return td::Status::Error(PSTRING() << "no InMsg in InMsgDescr for message with hash " << in_msg_hash.to_hex());
|
||||||
|
}
|
||||||
|
int tag = block::gen::t_InMsg.get_tag(*in_msg);
|
||||||
|
if (tag == block::gen::InMsg::msg_import_imm || tag == block::gen::InMsg::msg_import_fin ||
|
||||||
|
tag == block::gen::InMsg::msg_import_deferred_fin) {
|
||||||
|
Ref<vm::Cell> msg_env = in_msg->prefetch_ref();
|
||||||
|
if (msg_env.is_null()) {
|
||||||
|
return td::Status::Error(PSTRING() << "no MsgEnvelope in InMsg for message with hash " << in_msg_hash.to_hex());
|
||||||
|
}
|
||||||
|
vm::load_cell_slice(msg_env);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return td::Status::OK();
|
||||||
|
}
|
||||||
|
|
||||||
void LiteQuery::finish_listBlockTransactionsExt(int mode, int req_count) {
|
void LiteQuery::finish_listBlockTransactionsExt(int mode, int req_count) {
|
||||||
LOG(INFO) << "completing a listBlockTransactionsExt(" << base_blk_id_.to_str() << ", " << mode << ", " << req_count
|
LOG(INFO) << "completing a listBlockTransactionsExt(" << base_blk_id_.to_str() << ", " << mode << ", " << req_count
|
||||||
<< ", " << acc_addr_.to_hex() << ", " << trans_lt_ << ") liteserver query";
|
<< ", " << acc_addr_.to_hex() << ", " << trans_lt_ << ") liteserver query";
|
||||||
|
@ -2495,6 +2579,10 @@ void LiteQuery::finish_listBlockTransactionsExt(int mode, int req_count) {
|
||||||
CHECK(rhash == base_blk_id_.root_hash);
|
CHECK(rhash == base_blk_id_.root_hash);
|
||||||
vm::MerkleProofBuilder pb;
|
vm::MerkleProofBuilder pb;
|
||||||
auto virt_root = block_root;
|
auto virt_root = block_root;
|
||||||
|
if (mode & 256) {
|
||||||
|
// with msg metadata in proof
|
||||||
|
mode |= 32;
|
||||||
|
}
|
||||||
if (mode & 32) {
|
if (mode & 32) {
|
||||||
// proof requested
|
// proof requested
|
||||||
virt_root = pb.init(std::move(virt_root));
|
virt_root = pb.init(std::move(virt_root));
|
||||||
|
@ -2560,6 +2648,13 @@ void LiteQuery::finish_listBlockTransactionsExt(int mode, int req_count) {
|
||||||
++count;
|
++count;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (mode & 256) {
|
||||||
|
td::Status S = process_all_in_msg_metadata(extra.in_msg_descr, trans_roots);
|
||||||
|
if (S.is_error()) {
|
||||||
|
fatal_error(S.move_as_error());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
} catch (vm::VmError err) {
|
} catch (vm::VmError err) {
|
||||||
fatal_error("error while parsing AccountBlocks of block "s + base_blk_id_.to_str() + " : " + err.get_msg());
|
fatal_error("error while parsing AccountBlocks of block "s + base_blk_id_.to_str() + " : " + err.get_msg());
|
||||||
return;
|
return;
|
||||||
|
@ -3252,7 +3347,7 @@ void LiteQuery::continue_getOutMsgQueueSizes(td::optional<ShardIdFull> shard, Re
|
||||||
auto ig = mp.init_guard();
|
auto ig = mp.init_guard();
|
||||||
for (size_t i = 0; i < blocks.size(); ++i) {
|
for (size_t i = 0; i < blocks.size(); ++i) {
|
||||||
td::actor::send_closure(manager_, &ValidatorManager::get_out_msg_queue_size, blocks[i],
|
td::actor::send_closure(manager_, &ValidatorManager::get_out_msg_queue_size, blocks[i],
|
||||||
[promise = ig.get_promise(), res, i, id = blocks[i]](td::Result<td::uint32> R) mutable {
|
[promise = ig.get_promise(), res, i, id = blocks[i]](td::Result<td::uint64> R) mutable {
|
||||||
TRY_RESULT_PROMISE(promise, value, std::move(R));
|
TRY_RESULT_PROMISE(promise, value, std::move(R));
|
||||||
res->at(i) = create_tl_object<lite_api::liteServer_outMsgQueueSize>(
|
res->at(i) = create_tl_object<lite_api::liteServer_outMsgQueueSize>(
|
||||||
create_tl_lite_block_id(id), value);
|
create_tl_lite_block_id(id), value);
|
||||||
|
@ -3271,6 +3366,73 @@ void LiteQuery::continue_getOutMsgQueueSizes(td::optional<ShardIdFull> shard, Re
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void LiteQuery::perform_getBlockOutMsgQueueSize(int mode, BlockIdExt blkid) {
|
||||||
|
LOG(INFO) << "started a getBlockOutMsgQueueSize(" << blkid.to_str() << ", " << mode << ") liteserver query";
|
||||||
|
mode_ = mode;
|
||||||
|
if (!blkid.is_valid_full()) {
|
||||||
|
fatal_error("invalid BlockIdExt");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
set_continuation([=]() -> void { finish_getBlockOutMsgQueueSize(); });
|
||||||
|
request_block_data_state(blkid);
|
||||||
|
}
|
||||||
|
|
||||||
|
void LiteQuery::finish_getBlockOutMsgQueueSize() {
|
||||||
|
LOG(INFO) << "completing getBlockOutNsgQueueSize() query";
|
||||||
|
bool with_proof = mode_ & 1;
|
||||||
|
Ref<vm::Cell> state_root = state_->root_cell();
|
||||||
|
vm::MerkleProofBuilder pb;
|
||||||
|
if (with_proof) {
|
||||||
|
pb = vm::MerkleProofBuilder{state_root};
|
||||||
|
state_root = pb.root();
|
||||||
|
}
|
||||||
|
block::gen::ShardStateUnsplit::Record sstate;
|
||||||
|
block::gen::OutMsgQueueInfo::Record out_msg_queue_info;
|
||||||
|
if (!tlb::unpack_cell(state_root, sstate) || !tlb::unpack_cell(sstate.out_msg_queue_info, out_msg_queue_info)) {
|
||||||
|
fatal_error("cannot unpack shard state");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
vm::CellSlice& extra_slice = out_msg_queue_info.extra.write();
|
||||||
|
if (extra_slice.fetch_long(1) == 0) {
|
||||||
|
fatal_error("no out_msg_queue_size in shard state");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
block::gen::OutMsgQueueExtra::Record out_msg_queue_extra;
|
||||||
|
if (!tlb::unpack(extra_slice, out_msg_queue_extra)) {
|
||||||
|
fatal_error("cannot unpack OutMsgQueueExtra");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
vm::CellSlice& size_slice = out_msg_queue_extra.out_queue_size.write();
|
||||||
|
if (size_slice.fetch_long(1) == 0) {
|
||||||
|
fatal_error("no out_msg_queue_size in shard state");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
td::uint64 size = size_slice.prefetch_ulong(48);
|
||||||
|
|
||||||
|
td::BufferSlice proof;
|
||||||
|
if (with_proof) {
|
||||||
|
Ref<vm::Cell> proof1, proof2;
|
||||||
|
if (!make_state_root_proof(proof1)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (!pb.extract_proof_to(proof2)) {
|
||||||
|
fatal_error("unknown error creating Merkle proof");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto r_proof = vm::std_boc_serialize_multi({std::move(proof1), std::move(proof2)});
|
||||||
|
if (r_proof.is_error()) {
|
||||||
|
fatal_error(r_proof.move_as_error());
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
proof = r_proof.move_as_ok();
|
||||||
|
}
|
||||||
|
LOG(INFO) << "getBlockOutMsgQueueSize(" << blk_id_.to_str() << ", " << mode_ << ") query completed";
|
||||||
|
auto b = ton::create_serialize_tl_object<ton::lite_api::liteServer_blockOutMsgQueueSize>(
|
||||||
|
mode_, ton::create_tl_lite_block_id(blk_id_), size, std::move(proof));
|
||||||
|
finish_query(std::move(b));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void LiteQuery::perform_nonfinal_getCandidate(td::Bits256 source, BlockIdExt blkid, td::Bits256 collated_data_hash) {
|
void LiteQuery::perform_nonfinal_getCandidate(td::Bits256 source, BlockIdExt blkid, td::Bits256 collated_data_hash) {
|
||||||
LOG(INFO) << "started a nonfinal.getCandidate liteserver query";
|
LOG(INFO) << "started a nonfinal.getCandidate liteserver query";
|
||||||
td::actor::send_closure_later(
|
td::actor::send_closure_later(
|
||||||
|
|
|
@ -170,6 +170,8 @@ class LiteQuery : public td::actor::Actor {
|
||||||
std::vector<std::pair<BlockIdExt, td::BufferSlice>> result);
|
std::vector<std::pair<BlockIdExt, td::BufferSlice>> result);
|
||||||
void perform_getOutMsgQueueSizes(td::optional<ShardIdFull> shard);
|
void perform_getOutMsgQueueSizes(td::optional<ShardIdFull> shard);
|
||||||
void continue_getOutMsgQueueSizes(td::optional<ShardIdFull> shard, Ref<MasterchainState> state);
|
void continue_getOutMsgQueueSizes(td::optional<ShardIdFull> shard, Ref<MasterchainState> state);
|
||||||
|
void perform_getBlockOutMsgQueueSize(int mode, BlockIdExt blkid);
|
||||||
|
void finish_getBlockOutMsgQueueSize();
|
||||||
|
|
||||||
void perform_nonfinal_getCandidate(td::Bits256 source, BlockIdExt blkid, td::Bits256 collated_data_hash);
|
void perform_nonfinal_getCandidate(td::Bits256 source, BlockIdExt blkid, td::Bits256 collated_data_hash);
|
||||||
void perform_nonfinal_getValidatorGroups(int mode, ShardIdFull shard);
|
void perform_nonfinal_getValidatorGroups(int mode, ShardIdFull shard);
|
||||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -112,7 +112,8 @@ class ValidateQuery : public td::actor::Actor {
|
||||||
return SUPPORTED_VERSION;
|
return SUPPORTED_VERSION;
|
||||||
}
|
}
|
||||||
static constexpr long long supported_capabilities() {
|
static constexpr long long supported_capabilities() {
|
||||||
return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue;
|
return ton::capCreateStatsEnabled | ton::capBounceMsgBody | ton::capReportVersion | ton::capShortDequeue |
|
||||||
|
ton::capStoreOutMsgQueueSize | ton::capMsgMetadata | ton::capDeferMessages;
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
@ -227,9 +228,21 @@ class ValidateQuery : public td::actor::Actor {
|
||||||
bool inbound_queues_empty_{false};
|
bool inbound_queues_empty_{false};
|
||||||
|
|
||||||
std::vector<std::tuple<Bits256, LogicalTime, LogicalTime>> msg_proc_lt_;
|
std::vector<std::tuple<Bits256, LogicalTime, LogicalTime>> msg_proc_lt_;
|
||||||
|
std::vector<std::tuple<Bits256, LogicalTime, LogicalTime>> msg_emitted_lt_;
|
||||||
|
|
||||||
std::vector<std::tuple<Bits256, Bits256, bool>> lib_publishers_, lib_publishers2_;
|
std::vector<std::tuple<Bits256, Bits256, bool>> lib_publishers_, lib_publishers2_;
|
||||||
|
|
||||||
|
std::map<std::pair<StdSmcAddress, td::uint64>, Ref<vm::Cell>> removed_dispatch_queue_messages_;
|
||||||
|
std::map<std::pair<StdSmcAddress, td::uint64>, Ref<vm::Cell>> new_dispatch_queue_messages_;
|
||||||
|
std::set<StdSmcAddress> account_expected_defer_all_messages_;
|
||||||
|
td::uint64 old_out_msg_queue_size_ = 0, new_out_msg_queue_size_ = 0;
|
||||||
|
|
||||||
|
bool msg_metadata_enabled_ = false;
|
||||||
|
bool deferring_messages_enabled_ = false;
|
||||||
|
bool store_out_msg_queue_size_ = false;
|
||||||
|
|
||||||
|
bool have_unprocessed_account_dispatch_queue_ = false;
|
||||||
|
|
||||||
td::PerfWarningTimer perf_timer_;
|
td::PerfWarningTimer perf_timer_;
|
||||||
|
|
||||||
static constexpr td::uint32 priority() {
|
static constexpr td::uint32 priority() {
|
||||||
|
@ -309,6 +322,8 @@ class ValidateQuery : public td::actor::Actor {
|
||||||
bool check_cur_validator_set();
|
bool check_cur_validator_set();
|
||||||
bool check_mc_validator_info(bool update_mc_cc);
|
bool check_mc_validator_info(bool update_mc_cc);
|
||||||
bool check_utime_lt();
|
bool check_utime_lt();
|
||||||
|
bool prepare_out_msg_queue_size();
|
||||||
|
void got_out_queue_size(size_t i, td::Result<td::uint64> res);
|
||||||
|
|
||||||
bool fix_one_processed_upto(block::MsgProcessedUpto& proc, ton::ShardIdFull owner, bool allow_cur = false);
|
bool fix_one_processed_upto(block::MsgProcessedUpto& proc, ton::ShardIdFull owner, bool allow_cur = false);
|
||||||
bool fix_processed_upto(block::MsgProcessedUptoCollection& upto, bool allow_cur = false);
|
bool fix_processed_upto(block::MsgProcessedUptoCollection& upto, bool allow_cur = false);
|
||||||
|
@ -330,6 +345,9 @@ class ValidateQuery : public td::actor::Actor {
|
||||||
bool precheck_one_message_queue_update(td::ConstBitPtr out_msg_id, Ref<vm::CellSlice> old_value,
|
bool precheck_one_message_queue_update(td::ConstBitPtr out_msg_id, Ref<vm::CellSlice> old_value,
|
||||||
Ref<vm::CellSlice> new_value);
|
Ref<vm::CellSlice> new_value);
|
||||||
bool precheck_message_queue_update();
|
bool precheck_message_queue_update();
|
||||||
|
bool check_account_dispatch_queue_update(td::Bits256 addr, Ref<vm::CellSlice> old_queue_csr,
|
||||||
|
Ref<vm::CellSlice> new_queue_csr);
|
||||||
|
bool unpack_dispatch_queue_update();
|
||||||
bool update_max_processed_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash);
|
bool update_max_processed_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash);
|
||||||
bool update_min_enqueued_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash);
|
bool update_min_enqueued_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash);
|
||||||
bool check_imported_message(Ref<vm::Cell> msg_env);
|
bool check_imported_message(Ref<vm::Cell> msg_env);
|
||||||
|
@ -338,6 +356,7 @@ class ValidateQuery : public td::actor::Actor {
|
||||||
bool check_in_msg_descr();
|
bool check_in_msg_descr();
|
||||||
bool check_out_msg(td::ConstBitPtr key, Ref<vm::CellSlice> out_msg);
|
bool check_out_msg(td::ConstBitPtr key, Ref<vm::CellSlice> out_msg);
|
||||||
bool check_out_msg_descr();
|
bool check_out_msg_descr();
|
||||||
|
bool check_dispatch_queue_update();
|
||||||
bool check_processed_upto();
|
bool check_processed_upto();
|
||||||
bool check_neighbor_outbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalTime lt, td::ConstBitPtr key,
|
bool check_neighbor_outbound_message(Ref<vm::CellSlice> enq_msg, ton::LogicalTime lt, td::ConstBitPtr key,
|
||||||
const block::McShardDescr& src_nb, bool& unprocessed);
|
const block::McShardDescr& src_nb, bool& unprocessed);
|
||||||
|
|
|
@ -384,7 +384,7 @@ class ValidatorManagerImpl : public ValidatorManager {
|
||||||
void log_new_validator_group_stats(validatorsession::NewValidatorGroupStats stats) override {
|
void log_new_validator_group_stats(validatorsession::NewValidatorGroupStats stats) override {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint32> promise) override {
|
void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint64> promise) override {
|
||||||
if (queue_size_counter_.empty()) {
|
if (queue_size_counter_.empty()) {
|
||||||
queue_size_counter_ =
|
queue_size_counter_ =
|
||||||
td::actor::create_actor<QueueSizeCounter>("queuesizecounter", td::Ref<MasterchainState>{}, actor_id(this));
|
td::actor::create_actor<QueueSizeCounter>("queuesizecounter", td::Ref<MasterchainState>{}, actor_id(this));
|
||||||
|
|
|
@ -446,7 +446,7 @@ class ValidatorManagerImpl : public ValidatorManager {
|
||||||
void log_new_validator_group_stats(validatorsession::NewValidatorGroupStats stats) override {
|
void log_new_validator_group_stats(validatorsession::NewValidatorGroupStats stats) override {
|
||||||
UNREACHABLE();
|
UNREACHABLE();
|
||||||
}
|
}
|
||||||
void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint32> promise) override {
|
void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint64> promise) override {
|
||||||
if (queue_size_counter_.empty()) {
|
if (queue_size_counter_.empty()) {
|
||||||
queue_size_counter_ =
|
queue_size_counter_ =
|
||||||
td::actor::create_actor<QueueSizeCounter>("queuesizecounter", td::Ref<MasterchainState>{}, actor_id(this));
|
td::actor::create_actor<QueueSizeCounter>("queuesizecounter", td::Ref<MasterchainState>{}, actor_id(this));
|
||||||
|
|
|
@ -590,7 +590,7 @@ class ValidatorManagerImpl : public ValidatorManager {
|
||||||
|
|
||||||
void update_options(td::Ref<ValidatorManagerOptions> opts) override;
|
void update_options(td::Ref<ValidatorManagerOptions> opts) override;
|
||||||
|
|
||||||
void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint32> promise) override {
|
void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint64> promise) override {
|
||||||
if (queue_size_counter_.empty()) {
|
if (queue_size_counter_.empty()) {
|
||||||
if (last_masterchain_state_.is_null()) {
|
if (last_masterchain_state_.is_null()) {
|
||||||
promise.set_error(td::Status::Error(ErrorCode::notready, "not ready"));
|
promise.set_error(td::Status::Error(ErrorCode::notready, "not ready"));
|
||||||
|
|
|
@ -23,8 +23,8 @@
|
||||||
|
|
||||||
namespace ton::validator {
|
namespace ton::validator {
|
||||||
|
|
||||||
static td::Result<td::uint32> calc_queue_size(const td::Ref<ShardState> &state) {
|
static td::Result<td::uint64> calc_queue_size(const td::Ref<ShardState> &state) {
|
||||||
td::uint32 size = 0;
|
td::uint64 size = 0;
|
||||||
TRY_RESULT(outq_descr, state->message_queue());
|
TRY_RESULT(outq_descr, state->message_queue());
|
||||||
block::gen::OutMsgQueueInfo::Record qinfo;
|
block::gen::OutMsgQueueInfo::Record qinfo;
|
||||||
if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) {
|
if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) {
|
||||||
|
@ -41,8 +41,8 @@ static td::Result<td::uint32> calc_queue_size(const td::Ref<ShardState> &state)
|
||||||
return size;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static td::Result<td::uint32> recalc_queue_size(const td::Ref<ShardState> &state, const td::Ref<ShardState> &prev_state,
|
static td::Result<td::uint64> recalc_queue_size(const td::Ref<ShardState> &state, const td::Ref<ShardState> &prev_state,
|
||||||
td::uint32 prev_size) {
|
td::uint64 prev_size) {
|
||||||
TRY_RESULT(outq_descr, state->message_queue());
|
TRY_RESULT(outq_descr, state->message_queue());
|
||||||
block::gen::OutMsgQueueInfo::Record qinfo;
|
block::gen::OutMsgQueueInfo::Record qinfo;
|
||||||
if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) {
|
if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) {
|
||||||
|
@ -56,7 +56,7 @@ static td::Result<td::uint32> recalc_queue_size(const td::Ref<ShardState> &state
|
||||||
return td::Status::Error("invalid message queue");
|
return td::Status::Error("invalid message queue");
|
||||||
}
|
}
|
||||||
vm::AugmentedDictionary prev_queue{prev_qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue};
|
vm::AugmentedDictionary prev_queue{prev_qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue};
|
||||||
td::uint32 add = 0, rem = 0;
|
td::uint64 add = 0, rem = 0;
|
||||||
bool ok = prev_queue.scan_diff(
|
bool ok = prev_queue.scan_diff(
|
||||||
queue, [&](td::ConstBitPtr, int, td::Ref<vm::CellSlice> prev_val, td::Ref<vm::CellSlice> new_val) -> bool {
|
queue, [&](td::ConstBitPtr, int, td::Ref<vm::CellSlice> prev_val, td::Ref<vm::CellSlice> new_val) -> bool {
|
||||||
if (prev_val.not_null()) {
|
if (prev_val.not_null()) {
|
||||||
|
@ -88,11 +88,11 @@ void QueueSizeCounter::start_up() {
|
||||||
alarm();
|
alarm();
|
||||||
}
|
}
|
||||||
|
|
||||||
void QueueSizeCounter::get_queue_size(BlockIdExt block_id, td::Promise<td::uint32> promise) {
|
void QueueSizeCounter::get_queue_size(BlockIdExt block_id, td::Promise<td::uint64> promise) {
|
||||||
get_queue_size_ex(block_id, simple_mode_ || is_block_too_old(block_id), std::move(promise));
|
get_queue_size_ex(block_id, simple_mode_ || is_block_too_old(block_id), std::move(promise));
|
||||||
}
|
}
|
||||||
|
|
||||||
void QueueSizeCounter::get_queue_size_ex(ton::BlockIdExt block_id, bool calc_whole, td::Promise<td::uint32> promise) {
|
void QueueSizeCounter::get_queue_size_ex(ton::BlockIdExt block_id, bool calc_whole, td::Promise<td::uint64> promise) {
|
||||||
Entry &entry = results_[block_id];
|
Entry &entry = results_[block_id];
|
||||||
if (entry.done_) {
|
if (entry.done_) {
|
||||||
promise.set_result(entry.queue_size_);
|
promise.set_result(entry.queue_size_);
|
||||||
|
@ -152,12 +152,12 @@ void QueueSizeCounter::get_queue_size_cont(BlockHandle handle, td::Ref<ShardStat
|
||||||
}
|
}
|
||||||
|
|
||||||
auto prev_block_id = handle->one_prev(true);
|
auto prev_block_id = handle->one_prev(true);
|
||||||
get_queue_size(prev_block_id, [=, SelfId = actor_id(this), manager = manager_](td::Result<td::uint32> R) {
|
get_queue_size(prev_block_id, [=, SelfId = actor_id(this), manager = manager_](td::Result<td::uint64> R) {
|
||||||
if (R.is_error()) {
|
if (R.is_error()) {
|
||||||
td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, state->get_block_id(), R.move_as_error());
|
td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, state->get_block_id(), R.move_as_error());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
td::uint32 prev_size = R.move_as_ok();
|
td::uint64 prev_size = R.move_as_ok();
|
||||||
td::actor::send_closure(
|
td::actor::send_closure(
|
||||||
manager, &ValidatorManager::wait_block_state_short, prev_block_id, 0, td::Timestamp::in(10.0),
|
manager, &ValidatorManager::wait_block_state_short, prev_block_id, 0, td::Timestamp::in(10.0),
|
||||||
[=](td::Result<td::Ref<ShardState>> R) {
|
[=](td::Result<td::Ref<ShardState>> R) {
|
||||||
|
@ -171,7 +171,7 @@ void QueueSizeCounter::get_queue_size_cont(BlockHandle handle, td::Ref<ShardStat
|
||||||
}
|
}
|
||||||
|
|
||||||
void QueueSizeCounter::get_queue_size_cont2(td::Ref<ShardState> state, td::Ref<ShardState> prev_state,
|
void QueueSizeCounter::get_queue_size_cont2(td::Ref<ShardState> state, td::Ref<ShardState> prev_state,
|
||||||
td::uint32 prev_size) {
|
td::uint64 prev_size) {
|
||||||
BlockIdExt block_id = state->get_block_id();
|
BlockIdExt block_id = state->get_block_id();
|
||||||
Entry &entry = results_[block_id];
|
Entry &entry = results_[block_id];
|
||||||
CHECK(entry.started_);
|
CHECK(entry.started_);
|
||||||
|
@ -252,7 +252,7 @@ void QueueSizeCounter::process_top_shard_blocks_cont(td::Ref<MasterchainState> s
|
||||||
|
|
||||||
void QueueSizeCounter::get_queue_size_ex_retry(BlockIdExt block_id, bool calc_whole, td::Promise<td::Unit> promise) {
|
void QueueSizeCounter::get_queue_size_ex_retry(BlockIdExt block_id, bool calc_whole, td::Promise<td::Unit> promise) {
|
||||||
get_queue_size_ex(block_id, calc_whole,
|
get_queue_size_ex(block_id, calc_whole,
|
||||||
[=, promise = std::move(promise), SelfId = actor_id(this)](td::Result<td::uint32> R) mutable {
|
[=, promise = std::move(promise), SelfId = actor_id(this)](td::Result<td::uint64> R) mutable {
|
||||||
if (R.is_error()) {
|
if (R.is_error()) {
|
||||||
LOG(WARNING) << "Failed to calculate queue size for block " << block_id.to_str() << ": "
|
LOG(WARNING) << "Failed to calculate queue size for block " << block_id.to_str() << ": "
|
||||||
<< R.move_as_error();
|
<< R.move_as_error();
|
||||||
|
|
|
@ -26,7 +26,7 @@ class QueueSizeCounter : public td::actor::Actor {
|
||||||
}
|
}
|
||||||
|
|
||||||
void start_up() override;
|
void start_up() override;
|
||||||
void get_queue_size(BlockIdExt block_id, td::Promise<td::uint32> promise);
|
void get_queue_size(BlockIdExt block_id, td::Promise<td::uint64> promise);
|
||||||
void alarm() override;
|
void alarm() override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
@ -42,14 +42,14 @@ class QueueSizeCounter : public td::actor::Actor {
|
||||||
bool started_ = false;
|
bool started_ = false;
|
||||||
bool done_ = false;
|
bool done_ = false;
|
||||||
bool calc_whole_ = false;
|
bool calc_whole_ = false;
|
||||||
td::uint32 queue_size_ = 0;
|
td::uint64 queue_size_ = 0;
|
||||||
std::vector<td::Promise<td::uint32>> promises_;
|
std::vector<td::Promise<td::uint64>> promises_;
|
||||||
};
|
};
|
||||||
std::map<BlockIdExt, Entry> results_;
|
std::map<BlockIdExt, Entry> results_;
|
||||||
|
|
||||||
void get_queue_size_ex(BlockIdExt block_id, bool calc_whole, td::Promise<td::uint32> promise);
|
void get_queue_size_ex(BlockIdExt block_id, bool calc_whole, td::Promise<td::uint64> promise);
|
||||||
void get_queue_size_cont(BlockHandle handle, td::Ref<ShardState> state);
|
void get_queue_size_cont(BlockHandle handle, td::Ref<ShardState> state);
|
||||||
void get_queue_size_cont2(td::Ref<ShardState> state, td::Ref<ShardState> prev_state, td::uint32 prev_size);
|
void get_queue_size_cont2(td::Ref<ShardState> state, td::Ref<ShardState> prev_state, td::uint64 prev_size);
|
||||||
void on_error(BlockIdExt block_id, td::Status error);
|
void on_error(BlockIdExt block_id, td::Status error);
|
||||||
|
|
||||||
void process_top_shard_blocks();
|
void process_top_shard_blocks();
|
||||||
|
|
|
@ -249,7 +249,7 @@ class ValidatorManagerInterface : public td::actor::Actor {
|
||||||
|
|
||||||
virtual void prepare_perf_timer_stats(td::Promise<std::vector<PerfTimerStats>> promise) = 0;
|
virtual void prepare_perf_timer_stats(td::Promise<std::vector<PerfTimerStats>> promise) = 0;
|
||||||
virtual void add_perf_timer_stat(std::string name, double duration) = 0;
|
virtual void add_perf_timer_stat(std::string name, double duration) = 0;
|
||||||
virtual void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint32> promise) = 0;
|
virtual void get_out_msg_queue_size(BlockIdExt block_id, td::Promise<td::uint64> promise) = 0;
|
||||||
|
|
||||||
virtual void update_options(td::Ref<ValidatorManagerOptions> opts) = 0;
|
virtual void update_options(td::Ref<ValidatorManagerOptions> opts) = 0;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue