From 53fd88b67a04ed03698da1e693cd66ba9ffccabc Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 26 Apr 2023 09:15:16 +0300 Subject: [PATCH 01/71] Add timeout for out_msg_queue_cleanup (#679) Co-authored-by: SpyCheese --- validator/impl/collator-impl.h | 2 +- validator/impl/collator.cpp | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index dfe844a3..7b136953 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -73,7 +73,7 @@ class Collator final : public td::actor::Actor { Ref validator_set_; td::actor::ActorId manager; td::Timestamp timeout; - td::Timestamp soft_timeout_, medium_timeout_; + td::Timestamp queue_cleanup_timeout_, soft_timeout_, medium_timeout_; td::Promise main_promise; ton::BlockSeqno last_block_seqno{0}; ton::BlockSeqno prev_mc_block_seqno{0}; diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 83bc710c..6debf2de 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -67,6 +67,7 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockId , validator_set_(std::move(validator_set)) , manager(manager) , timeout(timeout) + , queue_cleanup_timeout_(td::Timestamp::at(timeout.at() - 5.0)) , soft_timeout_(td::Timestamp::at(timeout.at() - 3.0)) , medium_timeout_(td::Timestamp::at(timeout.at() - 1.5)) , main_promise(std::move(promise)) @@ -1814,6 +1815,11 @@ bool Collator::out_msg_queue_cleanup() { auto res = out_msg_queue_->filter([&](vm::CellSlice& cs, td::ConstBitPtr key, int n) -> int { assert(n == 352); // LOG(DEBUG) << "key is " << key.to_hex(n); + if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) { + LOG(WARNING) << "cleaning up outbound queue takes too long, ending"; + outq_cleanup_partial_ = true; + return (1 << 30) + 1; // retain all remaining outbound queue entries including this one without processing + } if (block_full_) { LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially"; outq_cleanup_partial_ = true; From f44dce93b632e9c1cd28baa7da079895db2c7c7f Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 26 Apr 2023 21:32:26 +0300 Subject: [PATCH 02/71] Make stricter cleanup timelimit --- validator/impl/collator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 6debf2de..6935f83c 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -67,7 +67,7 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockId , validator_set_(std::move(validator_set)) , manager(manager) , timeout(timeout) - , queue_cleanup_timeout_(td::Timestamp::at(timeout.at() - 5.0)) + , queue_cleanup_timeout_(td::Timestamp::at(timeout.at() - 9.0)) , soft_timeout_(td::Timestamp::at(timeout.at() - 3.0)) , medium_timeout_(td::Timestamp::at(timeout.at() - 1.5)) , main_promise(std::move(promise)) From 6786f1d5b4bda03e044f57fb210944f7ee6147e9 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Thu, 27 Apr 2023 14:43:37 +0300 Subject: [PATCH 03/71] Comment out excessive checks of outbound messages queue --- validator/impl/collator.cpp | 1 + validator/impl/validate-query.cpp | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 6935f83c..1c3cafe6 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -67,6 +67,7 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockId , validator_set_(std::move(validator_set)) , manager(manager) , timeout(timeout) + // default timeout is 10 seconds, declared in validator/validator-group.cpp:generate_block_candidate:run_collate_query , queue_cleanup_timeout_(td::Timestamp::at(timeout.at() - 9.0)) , soft_timeout_(td::Timestamp::at(timeout.at() - 3.0)) , medium_timeout_(td::Timestamp::at(timeout.at() - 1.5)) diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 7d8104e0..0e1b1199 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -5540,9 +5540,10 @@ bool ValidateQuery::try_validate() { if (!check_in_queue()) { return reject_query("cannot check inbound message queues"); } - if (!check_delivered_dequeued()) { + // Excessive check: validity of message in queue is checked elsewhere + /*if (!check_delivered_dequeued()) { return reject_query("cannot check delivery status of all outbound messages"); - } + }*/ if (!check_transactions()) { return reject_query("invalid collection of account transactions in ShardAccountBlocks"); } From 67e64e5d157d1d78311817ec37b866fe4116e64d Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Fri, 28 Apr 2023 16:42:11 +0300 Subject: [PATCH 04/71] Revert too strictening of queue_cleanup_timeout --- validator/impl/collator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 1c3cafe6..4e68b07c 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -68,7 +68,7 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockId , manager(manager) , timeout(timeout) // default timeout is 10 seconds, declared in validator/validator-group.cpp:generate_block_candidate:run_collate_query - , queue_cleanup_timeout_(td::Timestamp::at(timeout.at() - 9.0)) + , queue_cleanup_timeout_(td::Timestamp::at(timeout.at() - 5.0)) , soft_timeout_(td::Timestamp::at(timeout.at() - 3.0)) , medium_timeout_(td::Timestamp::at(timeout.at() - 1.5)) , main_promise(std::move(promise)) From daa714552c4e10e8921de5d16eedb18bd50a935a Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 3 May 2023 14:18:18 +0300 Subject: [PATCH 05/71] Increase limit for t_Block.validate, simplify validating StateInit (#691) Co-authored-by: SpyCheese --- crypto/block/block-parse.cpp | 32 +++++++++++++++++++++++++++++++ crypto/block/block-parse.h | 4 ++++ crypto/block/block.tlb | 9 +++++++-- crypto/block/transaction.cpp | 9 +++++++-- validator/impl/collator.cpp | 11 +++++++++-- validator/impl/validate-query.cpp | 8 ++++---- 6 files changed, 63 insertions(+), 10 deletions(-) diff --git a/crypto/block/block-parse.cpp b/crypto/block/block-parse.cpp index e9eb8209..7d51b2e2 100644 --- a/crypto/block/block-parse.cpp +++ b/crypto/block/block-parse.cpp @@ -2292,5 +2292,37 @@ bool Aug_ShardFees::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const { const Aug_ShardFees aug_ShardFees; +bool validate_message_libs(const td::Ref &cell) { + gen::Message::Record rec; + if (!type_unpack_cell(cell, gen::t_Message_Any, rec)) { + return false; + } + vm::CellSlice& state_init = rec.init.write(); + if (!state_init.fetch_long(1)) { + return true; + } + if (state_init.fetch_long(1)) { + return gen::t_StateInitWithLibs.validate_ref(state_init.prefetch_ref()); + } else { + return gen::t_StateInitWithLibs.validate_csr(rec.init); + } +} + +bool validate_message_relaxed_libs(const td::Ref &cell) { + gen::MessageRelaxed::Record rec; + if (!type_unpack_cell(cell, gen::t_MessageRelaxed_Any, rec)) { + return false; + } + vm::CellSlice& state_init = rec.init.write(); + if (!state_init.fetch_long(1)) { + return true; + } + if (state_init.fetch_long(1)) { + return gen::t_StateInitWithLibs.validate_ref(state_init.prefetch_ref()); + } else { + return gen::t_StateInitWithLibs.validate_csr(rec.init); + } +} + } // namespace tlb } // namespace block diff --git a/crypto/block/block-parse.h b/crypto/block/block-parse.h index ad4faec0..c0b11745 100644 --- a/crypto/block/block-parse.h +++ b/crypto/block/block-parse.h @@ -1113,5 +1113,9 @@ struct Aug_ShardFees final : AugmentationCheckData { extern const Aug_ShardFees aug_ShardFees; +// Validate dict of libraries in message: used when sending and receiving message +bool validate_message_libs(const td::Ref &cell); +bool validate_message_relaxed_libs(const td::Ref &cell); + } // namespace tlb } // namespace block diff --git a/crypto/block/block.tlb b/crypto/block/block.tlb index 8662e243..25e50a23 100644 --- a/crypto/block/block.tlb +++ b/crypto/block/block.tlb @@ -143,8 +143,13 @@ tick_tock$_ tick:Bool tock:Bool = TickTock; _ split_depth:(Maybe (## 5)) special:(Maybe TickTock) code:(Maybe ^Cell) data:(Maybe ^Cell) - library:(HashmapE 256 SimpleLib) = StateInit; - + library:(Maybe ^Cell) = StateInit; + +// StateInitWithLibs is used to validate sent and received messages +_ split_depth:(Maybe (## 5)) special:(Maybe TickTock) + code:(Maybe ^Cell) data:(Maybe ^Cell) + library:(HashmapE 256 SimpleLib) = StateInitWithLibs; + simple_lib$_ public:Bool root:^Cell = SimpleLib; message$_ {X:Type} info:CommonMsgInfo diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index fcabf0c0..1a8f111c 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -635,14 +635,15 @@ bool Transaction::unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* vm::CellBuilder cb; if (!(cs.advance(2) && block::gen::t_StateInit.fetch_to(cs, state_init) && cb.append_cellslice_bool(std::move(state_init)) && cb.finalize_to(in_msg_state) && - block::gen::t_StateInit.validate_ref(in_msg_state))) { + block::gen::t_StateInitWithLibs.validate_ref(in_msg_state))) { LOG(DEBUG) << "cannot parse StateInit in inbound message"; return false; } break; } case 3: { // (just$1 (right$1 _:^StateInit )) - if (!(cs.advance(2) && cs.fetch_ref_to(in_msg_state) && block::gen::t_StateInit.validate_ref(in_msg_state))) { + if (!(cs.advance(2) && cs.fetch_ref_to(in_msg_state) && + block::gen::t_StateInitWithLibs.validate_ref(in_msg_state))) { LOG(DEBUG) << "cannot parse ^StateInit in inbound message"; return false; } @@ -1534,6 +1535,10 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, if (!tlb::type_unpack_cell(act_rec.out_msg, block::gen::t_MessageRelaxed_Any, msg)) { return -1; } + if (!block::tlb::validate_message_relaxed_libs(act_rec.out_msg)) { + LOG(DEBUG) << "outbound message has invalid libs in StateInit"; + return -1; + } if (redoing >= 1) { if (msg.init->size_refs() >= 2) { LOG(DEBUG) << "moving the StateInit of a suggested outbound message into a separate cell"; diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 4e68b07c..9a6c512b 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -2565,7 +2565,7 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT return false; } if (!block::tlb::t_MsgEnvelope.validate_ref(msg_env)) { - LOG(ERROR) << "inbound internal MsgEnvelope is invalid according to automated checks"; + LOG(ERROR) << "inbound internal MsgEnvelope is invalid according to hand-written checks"; return false; } // 1. unpack MsgEnvelope @@ -2590,6 +2590,10 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT "its contents"; return false; } + if (!block::tlb::validate_message_libs(env.msg)) { + LOG(ERROR) << "inbound internal message has invalid StateInit"; + return false; + } // 2.0. update last_proc_int_msg if (!update_last_proc_int_msg(std::pair(lt, env.msg->get_hash().bits()))) { return fatal_error("processing a message AFTER a newer message has been processed"); @@ -3876,7 +3880,7 @@ bool Collator::create_block() { } if (verify >= 1) { LOG(INFO) << "verifying new Block"; - if (!block::gen::t_Block.validate_ref(1000000, new_block)) { + if (!block::gen::t_Block.validate_ref(10000000, new_block)) { return fatal_error("new Block failed to pass automatic validity tests"); } } @@ -4030,6 +4034,9 @@ td::Result Collator::register_external_message_cell(Ref ext_msg, if (!block::tlb::t_Message.validate_ref(256, ext_msg)) { return td::Status::Error("external message is not a (Message Any) according to hand-written checks"); } + if (!block::tlb::validate_message_libs(ext_msg)) { + return td::Status::Error("external message has invalid libs in StateInit"); + } block::gen::CommonMsgInfo::Record_ext_in_msg_info info; if (!tlb::unpack_cell_inexact(ext_msg, info)) { return td::Status::Error("cannot unpack external message header"); diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 0e1b1199..b47254c3 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -2111,13 +2111,13 @@ bool ValidateQuery::unpack_block_data() { auto outmsg_cs = vm::load_cell_slice_ref(std::move(extra.out_msg_descr)); // run some hand-written checks from block::tlb:: // (automatic tests from block::gen:: have been already run for the entire block) - if (!block::tlb::t_InMsgDescr.validate_upto(1000000, *inmsg_cs)) { + if (!block::tlb::t_InMsgDescr.validate_upto(10000000, *inmsg_cs)) { return reject_query("InMsgDescr of the new block failed to pass handwritten validity tests"); } - if (!block::tlb::t_OutMsgDescr.validate_upto(1000000, *outmsg_cs)) { + if (!block::tlb::t_OutMsgDescr.validate_upto(10000000, *outmsg_cs)) { return reject_query("OutMsgDescr of the new block failed to pass handwritten validity tests"); } - if (!block::tlb::t_ShardAccountBlocks.validate_ref(1000000, extra.account_blocks)) { + if (!block::tlb::t_ShardAccountBlocks.validate_ref(10000000, extra.account_blocks)) { return reject_query("ShardAccountBlocks of the new block failed to pass handwritten validity tests"); } in_msg_dict_ = std::make_unique(std::move(inmsg_cs), 256, block::tlb::aug_InMsgDescr); @@ -5507,7 +5507,7 @@ bool ValidateQuery::try_validate() { } } LOG(INFO) << "running automated validity checks for block candidate " << id_.to_str(); - if (!block::gen::t_Block.validate_ref(1000000, block_root_)) { + if (!block::gen::t_Block.validate_ref(10000000, block_root_)) { return reject_query("block "s + id_.to_str() + " failed to pass automated validity checks"); } if (!fix_all_processed_upto()) { From 260c2c9ad65b69c0760f0506d9a8811823148cdc Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Thu, 4 May 2023 14:45:42 +0300 Subject: [PATCH 06/71] Account for unprocessed messages in estimate_block_size; check consensus_config limits in collator (#692) Co-authored-by: SpyCheese --- crypto/block/block.cpp | 2 +- crypto/block/block.h | 3 ++- validator/impl/collator.cpp | 14 ++++++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/crypto/block/block.cpp b/crypto/block/block.cpp index 9e2caef5..e1790cbf 100644 --- a/crypto/block/block.cpp +++ b/crypto/block/block.cpp @@ -715,7 +715,7 @@ td::uint64 BlockLimitStatus::estimate_block_size(const vm::NewCellStorageStat::S sum += *extra; } return 2000 + (sum.bits >> 3) + sum.cells * 12 + sum.internal_refs * 3 + sum.external_refs * 40 + accounts * 200 + - transactions * 200 + (extra ? 200 : 0); + transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300; } int BlockLimitStatus::classify() const { diff --git a/crypto/block/block.h b/crypto/block/block.h index 6c460e31..f5b47a63 100644 --- a/crypto/block/block.h +++ b/crypto/block/block.h @@ -261,7 +261,7 @@ struct BlockLimitStatus { ton::LogicalTime cur_lt; td::uint64 gas_used{}; vm::NewCellStorageStat st_stat; - unsigned accounts{}, transactions{}; + unsigned accounts{}, transactions{}, extra_out_msgs{}; BlockLimitStatus(const BlockLimits& limits_, ton::LogicalTime lt = 0) : limits(limits_), cur_lt(std::max(limits_.start_lt, lt)) { } @@ -270,6 +270,7 @@ struct BlockLimitStatus { st_stat.set_zero(); transactions = accounts = 0; gas_used = 0; + extra_out_msgs = 0; } td::uint64 estimate_block_size(const vm::NewCellStorageStat::Stat* extra = nullptr) const; int classify() const; diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 9a6c512b..34dfca61 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -2961,6 +2961,7 @@ bool Collator::process_new_messages(bool enqueue_only) { while (!new_msgs.empty()) { block::NewOutMsg msg = new_msgs.top(); new_msgs.pop(); + block_limit_status_->extra_out_msgs--; if (block_full_ && !enqueue_only) { LOG(INFO) << "BLOCK FULL, enqueue all remaining new messages"; enqueue_only = true; @@ -2982,6 +2983,7 @@ void Collator::register_new_msg(block::NewOutMsg new_msg) { min_new_msg_lt = new_msg.lt; } new_msgs.push(std::move(new_msg)); + block_limit_status_->extra_out_msgs++; } void Collator::register_new_msgs(block::transaction::Transaction& trans) { @@ -3972,6 +3974,18 @@ bool Collator::create_block_candidate() { ton::BlockIdExt{ton::BlockId{shard_, new_block_seqno}, new_block->get_hash().bits(), block::compute_file_hash(blk_slice.as_slice())}, block::compute_file_hash(cdata_slice.as_slice()), blk_slice.clone(), cdata_slice.clone()); + // 3.1 check block and collated data size + auto consensus_config = config_->get_consensus_config(); + if (block_candidate->data.size() > consensus_config.max_block_size) { + return fatal_error(PSTRING() << "block size (" << block_candidate->data.size() + << ") exceeds the limit in consensus config (" << consensus_config.max_block_size + << ")"); + } + if (block_candidate->collated_data.size() > consensus_config.max_collated_data_size) { + return fatal_error(PSTRING() << "collated data size (" << block_candidate->collated_data.size() + << ") exceeds the limit in consensus config (" + << consensus_config.max_collated_data_size << ")"); + } // 4. save block candidate LOG(INFO) << "saving new BlockCandidate"; td::actor::send_closure_later(manager, &ValidatorManager::set_block_candidate, block_candidate->id, From fa6b4c065888168139232549465088e90d9e04e8 Mon Sep 17 00:00:00 2001 From: sapientisatus <112001068+sapientisatus@users.noreply.github.com> Date: Wed, 24 May 2023 08:37:22 +0100 Subject: [PATCH 07/71] Update README.md (#702) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6bc3b1f5..0d4ebd98 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ Main TON monorepo, which includes the code of the node/validator, lite-client, t ## The Open Network __The Open Network (TON)__ is a fast, secure, scalable blockchain focused on handling _millions of transactions per second_ (TPS) with the goal of reaching hundreds of millions of blockchain users. -- To learn more about different aspects of TON blockchain and its underlying ecosystem check [documentation](ton.org/docs) +- To learn more about different aspects of TON blockchain and its underlying ecosystem check [documentation](https://ton.org/docs) - To run node, validator or lite-server check [Participate section](https://ton.org/docs/participate/nodes/run-node) - To develop decentralised apps check [Tutorials](https://ton.org/docs/develop/smart-contracts/), [FunC docs](https://ton.org/docs/develop/func/overview) and [DApp tutorials](https://ton.org/docs/develop/dapps/) - To work on TON check [wallets](https://ton.app/wallets), [explorers](https://ton.app/explorers), [DEXes](https://ton.app/dex) and [utilities](https://ton.app/utilities) From 8591886ee6f634d84a5fcbd6242d71cc13e35864 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 31 May 2023 16:19:37 +0300 Subject: [PATCH 08/71] Add 2023.06 update changelog --- Changelog.md | 6 ++++++ recent_changelog.md | 11 ++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/Changelog.md b/Changelog.md index 0f0cd833..1deebc61 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,3 +1,9 @@ +## 2023.06 Update +1. (disabled by default) New deflation mechanisms: partial fee burning and blackhole address +2. Storage-contract improvement + +Besides the work of the core team, this update is based on the efforts of @DearJohnDoe from Tonbyte (Storage-contract improvement). + ## 2023.05 Update 1. Archive manager optimization 2. A series of catchain (basic consensus protocol) security improvements diff --git a/recent_changelog.md b/recent_changelog.md index 39ca5c94..fe2c34b2 100644 --- a/recent_changelog.md +++ b/recent_changelog.md @@ -1,8 +1,5 @@ -## 2023.05 Update -1. Archive manager optimization -2. A series of catchain (basic consensus protocol) security improvements -3. Update for Fift libraries and FunC: better error-handling, fixes for `catch` stack recovery -4. A series of out message queue handling optimization (already deployed during emergency upgrades between releases) -5. Improvement of binaries portability +## 2023.06 Update +1. (disabled by default) New deflation mechanisms: partial fee burning and blackhole address +2. Storage-contract improvement -Besides the work of the core team, this update is based on the efforts of @aleksej-paschenko (portability improvement), [Disintar team](https://github.com/disintar/) (archive manager optimization) and [sec3-service](https://github.com/sec3-service) security auditors (funC improvements). +Besides the work of the core team, this update is based on the efforts of @DearJohnDoe from Tonbyte (Storage-contract improvement). From 93bbe3902e3d413d6000ce21d532bc59453d3691 Mon Sep 17 00:00:00 2001 From: neodiX Date: Thu, 1 Jun 2023 10:42:56 +0200 Subject: [PATCH 09/71] Fix windows build --- .github/workflows/win-2019-compile.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/win-2019-compile.yml b/.github/workflows/win-2019-compile.yml index b0c99971..5a1f74a2 100644 --- a/.github/workflows/win-2019-compile.yml +++ b/.github/workflows/win-2019-compile.yml @@ -40,8 +40,8 @@ jobs: - name: Install pre-compiled OpenSSL Win64 run: | - curl -Lo openssl-1.1.1o.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1o.zip - jar xf openssl-1.1.1o.zip + curl -Lo openssl-1.1.1j.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1j.zip + jar xf openssl-1.1.1j.zip - name: Install pre-compiled libmicrohttpd Win64 run: | @@ -54,7 +54,7 @@ jobs: echo %root% mkdir build cd build - cmake -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1/x64/lib/libcrypto.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. + cmake -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1.1j/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1.1j/lib/libcrypto_static.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj" .. cmake --build . --target storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork --config Release - name: Show executables From 872d38bff5c119e93042a63865ec7d1aad7b8887 Mon Sep 17 00:00:00 2001 From: neodiX Date: Thu, 1 Jun 2023 11:55:04 +0200 Subject: [PATCH 10/71] Update libmicrohttp library in Windows build --- .github/workflows/win-2019-compile.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/win-2019-compile.yml b/.github/workflows/win-2019-compile.yml index 5a1f74a2..26ed2ecb 100644 --- a/.github/workflows/win-2019-compile.yml +++ b/.github/workflows/win-2019-compile.yml @@ -45,8 +45,8 @@ jobs: - name: Install pre-compiled libmicrohttpd Win64 run: | - curl -Lo libmicrohttpd-latest-w32-bin.zip https://ftpmirror.gnu.org/libmicrohttpd/libmicrohttpd-latest-w32-bin.zip - unzip libmicrohttpd-latest-w32-bin.zip + curl -Lo libmicrohttpd-0.9.77-w32-bin.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/libmicrohttpd-0.9.77-w32-bin.zip + unzip libmicrohttpd-0.9.77-w32-bin.zip - name: Compile run: | @@ -54,7 +54,7 @@ jobs: echo %root% mkdir build cd build - cmake -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.75-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1.1j/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1.1j/lib/libcrypto_static.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj" .. + cmake -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1.1j/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1.1j/lib/libcrypto_static.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj" .. cmake --build . --target storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork --config Release - name: Show executables From 469fb08c49f5f526c0cb65c939171bb0f7e5a53e Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Tue, 27 Jun 2023 09:22:09 +0300 Subject: [PATCH 11/71] New tag for encrypted messages --- crypto/smc-envelope/WalletInterface.cpp | 2 +- tonlib/tonlib/TonlibClient.cpp | 42 +++++++++++++------------ 2 files changed, 23 insertions(+), 21 deletions(-) diff --git a/crypto/smc-envelope/WalletInterface.cpp b/crypto/smc-envelope/WalletInterface.cpp index 4c2feca9..418bc4a4 100644 --- a/crypto/smc-envelope/WalletInterface.cpp +++ b/crypto/smc-envelope/WalletInterface.cpp @@ -68,7 +68,7 @@ void WalletInterface::store_gift_message(vm::CellBuilder &cb, const Gift &gift) } if (gift.is_encrypted) { - cb.store_long(1, 32); + cb.store_long(0x2167da4b, 32); } else { cb.store_long(0, 32); } diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index a2dfa9cf..dfdc40e1 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -2871,29 +2871,31 @@ struct ToRawTransactions { auto get_data = [body = std::move(body), body_cell = std::move(body_cell), init_state_cell = std::move(init_state_cell), this](td::Slice salt) mutable { tonlib_api::object_ptr data; - if (try_decode_messages_ && body->size() >= 32 && static_cast(body->prefetch_long(32)) <= 1) { - auto type = body.write().fetch_long(32); - td::Status status; + if (try_decode_messages_ && body->size() >= 32) { + auto type = static_cast(body.write().fetch_long(32)); + if (type == 0 || type == 0x2167da4b) { + td::Status status; - auto r_body_message = vm::CellString::load(body.write()); - LOG_IF(WARNING, r_body_message.is_error()) << "Failed to parse a message: " << r_body_message.error(); + auto r_body_message = vm::CellString::load(body.write()); + LOG_IF(WARNING, r_body_message.is_error()) << "Failed to parse a message: " << r_body_message.error(); - if (r_body_message.is_ok()) { - if (type == 0) { - data = tonlib_api::make_object(r_body_message.move_as_ok()); - } else { - auto encrypted_message = r_body_message.move_as_ok(); - auto r_decrypted_message = [&]() -> td::Result { - if (!private_key_) { - return TonlibError::EmptyField("private_key"); - } - TRY_RESULT(decrypted, SimpleEncryptionV2::decrypt_data(encrypted_message, private_key_.value(), salt)); - return decrypted.data.as_slice().str(); - }(); - if (r_decrypted_message.is_ok()) { - data = tonlib_api::make_object(r_decrypted_message.move_as_ok()); + if (r_body_message.is_ok()) { + if (type == 0) { + data = tonlib_api::make_object(r_body_message.move_as_ok()); } else { - data = tonlib_api::make_object(encrypted_message); + auto encrypted_message = r_body_message.move_as_ok(); + auto r_decrypted_message = [&]() -> td::Result { + if (!private_key_) { + return TonlibError::EmptyField("private_key"); + } + TRY_RESULT(decrypted, SimpleEncryptionV2::decrypt_data(encrypted_message, private_key_.value(), salt)); + return decrypted.data.as_slice().str(); + }(); + if (r_decrypted_message.is_ok()) { + data = tonlib_api::make_object(r_decrypted_message.move_as_ok()); + } else { + data = tonlib_api::make_object(encrypted_message); + } } } } From e1197b13d43a082a48402bdbdeadab472087ad09 Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Mon, 7 Aug 2023 19:48:33 +0300 Subject: [PATCH 12/71] Fix parsing dict of public libraries (#762) --- validator/impl/validate-query.cpp | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 7a5934b4..851f3746 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -4783,27 +4783,27 @@ bool ValidateQuery::check_one_library_update(td::ConstBitPtr key, Ref old_publishers, new_publishers; if (new_value.not_null()) { - if (!block::gen::t_LibDescr.validate_csr(new_value)) { - return reject_query("LibDescr with key "s + key.to_hex(256) + - " in the libraries dictionary of the new state failed to pass automatic validity tests"); + block::gen::LibDescr::Record rec; + if (!block::gen::csr_unpack(std::move(new_value), rec)) { + return reject_query("failed to unpack LibDescr with key "s + key.to_hex(256) + + " in the libraries dictionary of the new state"); } - auto lib_ref = new_value->prefetch_ref(); - CHECK(lib_ref.not_null()); - if (lib_ref->get_hash().as_bitslice() != key) { + if (rec.lib->get_hash().as_bitslice() != key) { return reject_query("LibDescr with key "s + key.to_hex(256) + " in the libraries dictionary of the new state contains a library with different root hash " + - lib_ref->get_hash().to_hex()); + rec.lib->get_hash().to_hex()); } - CHECK(new_value.write().advance_ext(2, 1)); - new_publishers = std::make_unique(vm::DictNonEmpty(), std::move(new_value), 256); + new_publishers = std::make_unique(vm::DictNonEmpty(), std::move(rec.publishers), 256); } else { new_publishers = std::make_unique(256); } - if (old_value.not_null() && !block::gen::t_LibDescr.validate_csr(old_value)) { - return reject_query("LibDescr with key "s + key.to_hex(256) + - " in the libraries dictionary of the old state failed to pass automatic validity tests"); - CHECK(old_value.write().advance_ext(2, 1)); - old_publishers = std::make_unique(vm::DictNonEmpty(), std::move(old_value), 256); + if (old_value.not_null()) { + block::gen::LibDescr::Record rec; + if (!block::gen::csr_unpack(std::move(old_value), rec)) { + return reject_query("failed to unpack LibDescr with key "s + key.to_hex(256) + + " in the libraries dictionary of the old state"); + } + old_publishers = std::make_unique(vm::DictNonEmpty(), std::move(rec.publishers), 256); } else { old_publishers = std::make_unique(256); } From 65d22c46d9955041baad11a5dbd9a2c8c21beaf9 Mon Sep 17 00:00:00 2001 From: neodiX42 Date: Wed, 4 Oct 2023 21:56:33 +0300 Subject: [PATCH 13/71] Rework locking mechanism in blockchain-explorer. (#772) Mainly because it was causing crash on Windows with error "unlock of unowned mutex". --- blockchain-explorer/blockchain-explorer.cpp | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/blockchain-explorer/blockchain-explorer.cpp b/blockchain-explorer/blockchain-explorer.cpp index 035b84ea..3e7d9a97 100644 --- a/blockchain-explorer/blockchain-explorer.cpp +++ b/blockchain-explorer/blockchain-explorer.cpp @@ -103,23 +103,24 @@ class HttpQueryRunner { Self->finish(nullptr); } }); - mutex_.lock(); scheduler_ptr->run_in_context_external([&]() { func(std::move(P)); }); } void finish(MHD_Response* response) { + std::unique_lock lock(mutex_); response_ = response; - mutex_.unlock(); + cond.notify_all(); } MHD_Response* wait() { - mutex_.lock(); - mutex_.unlock(); + std::unique_lock lock(mutex_); + cond.wait(lock, [&]() { return response_ != nullptr; }); return response_; } private: std::function)> func_; - MHD_Response* response_; + MHD_Response* response_ = nullptr; std::mutex mutex_; + std::condition_variable cond; }; class CoreActor : public CoreActorInterface { From 866fbf936b2669cf337027d3de636facaf62e56f Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Thu, 12 Oct 2023 14:53:54 +0300 Subject: [PATCH 14/71] Explicitly handle special cells in action list (#777) --- crypto/block/transaction.cpp | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 0a02d6fd..e82b1eeb 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -943,7 +943,12 @@ int output_actions_count(Ref list) { int i = -1; do { ++i; - list = load_cell_slice(std::move(list)).prefetch_ref(); + bool special = true; + auto cs = load_cell_slice_special(std::move(list), special); + if (special) { + break; + } + list = cs.prefetch_ref(); } while (list.not_null()); return i; } @@ -1122,7 +1127,8 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { int out_act_num = output_actions_count(cp.actions); if (verbosity > 2) { std::cerr << "new smart contract data: "; - load_cell_slice(cp.new_data).print_rec(std::cerr); + bool can_be_special = true; + load_cell_slice_special(cp.new_data, can_be_special).print_rec(std::cerr); std::cerr << "output actions: "; block::gen::OutList{out_act_num}.print_ref(std::cerr, cp.actions); } @@ -1192,7 +1198,15 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { int n = 0; while (true) { ap.action_list.push_back(list); - auto cs = load_cell_slice(std::move(list)); + bool special = true; + auto cs = load_cell_slice_special(std::move(list), special); + if (special) { + ap.result_code = 32; // action list invalid + ap.result_arg = n; + ap.action_list_invalid = true; + LOG(DEBUG) << "action list invalid: special cell"; + return true; + } if (!cs.size_ext()) { break; } From 2f8e80ef5617aa546222564c62952ba9efd4a6d8 Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Thu, 12 Oct 2023 14:54:14 +0300 Subject: [PATCH 15/71] Do not retranslate external messages with wrong initstate (#778) --- crypto/block/transaction.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index e82b1eeb..b1be19c4 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1065,6 +1065,11 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { } else if (in_msg_state.not_null()) { unpack_msg_state(true); // use only libraries } + if (in_msg_extern && in_msg_state.not_null() && account.addr != in_msg_state->get_hash().bits()) { + LOG(DEBUG) << "in_msg_state hash mismatch in external message"; + cp.skip_reason = ComputePhase::sk_bad_state; + return true; + } // initialize VM Ref stack = prepare_vm_stack(cp); if (stack.is_null()) { From e1df0b3c9088097724b8feba29b7d60d3dba5a5c Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Thu, 12 Oct 2023 14:54:38 +0300 Subject: [PATCH 16/71] Check peers prior to saving it to local db (#779) --- overlay/overlay.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/overlay/overlay.cpp b/overlay/overlay.cpp index a562beb6..fcf766fe 100644 --- a/overlay/overlay.cpp +++ b/overlay/overlay.cpp @@ -291,7 +291,11 @@ void OverlayImpl::alarm() { if (peers_.size() > 0) { std::vector vec; for (td::uint32 i = 0; i < 20; i++) { - vec.push_back(get_random_peer()->get()); + auto P = get_random_peer(); + if (!P) { + break; + } + vec.push_back(P->get()); } td::actor::send_closure(manager_, &OverlayManager::save_to_db, local_id_, overlay_id_, std::move(vec)); } From 9a06a2ebfbdb2a8e18d45a3967fa51b96da4e2b3 Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Thu, 12 Oct 2023 14:55:26 +0300 Subject: [PATCH 17/71] Human-readable timestamps in explorer (#776) --- .../blockchain-explorer-http.cpp | 40 ++++++++++++++++--- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/blockchain-explorer/blockchain-explorer-http.cpp b/blockchain-explorer/blockchain-explorer-http.cpp index 8642e900..e2322ff7 100644 --- a/blockchain-explorer/blockchain-explorer-http.cpp +++ b/blockchain-explorer/blockchain-explorer-http.cpp @@ -35,9 +35,38 @@ #include "vm/cells/MerkleProof.h" #include "block/mc-config.h" #include "ton/ton-shard.h" +#include "td/utils/date.h" bool local_scripts{false}; +static std::string time_to_human(unsigned ts) { + td::StringBuilder sb; + sb << date::format("%F %T", + std::chrono::time_point{std::chrono::seconds(ts)}) + << ", "; + auto now = (unsigned)td::Clocks::system(); + bool past = now >= ts; + unsigned x = past ? now - ts : ts - now; + if (!past) { + sb << "in "; + } + if (x < 60) { + sb << x << "s"; + } else if (x < 3600) { + sb << x / 60 << "m " << x % 60 << "s"; + } else if (x < 3600 * 24) { + x /= 60; + sb << x / 60 << "h " << x % 60 << "m"; + } else { + x /= 3600; + sb << x / 24 << "d " << x % 24 << "h"; + } + if (past) { + sb << " ago"; + } + return sb.as_cslice().str(); +} + HttpAnswer& HttpAnswer::operator<<(AddressCell addr_c) { ton::WorkchainId wc; ton::StdSmcAddress addr; @@ -84,7 +113,7 @@ HttpAnswer& HttpAnswer::operator<<(MessageCell msg) { << "source" << AddressCell{info.src} << "\n" << "destinationNONE\n" << "lt" << info.created_lt << "\n" - << "time" << info.created_at << "\n"; + << "time" << info.created_at << " (" << time_to_human(info.created_at) << ")\n"; break; } case block::gen::CommonMsgInfo::int_msg_info: { @@ -103,7 +132,7 @@ HttpAnswer& HttpAnswer::operator<<(MessageCell msg) { << "source" << AddressCell{info.src} << "\n" << "destination" << AddressCell{info.dest} << "\n" << "lt" << info.created_lt << "\n" - << "time" << info.created_at << "\n" + << "time" << info.created_at << " (" << time_to_human(info.created_at) << ")\n" << "value" << value << "\n"; break; } @@ -277,7 +306,7 @@ HttpAnswer& HttpAnswer::operator<<(TransactionCell trans_c) { << "account" << trans_c.addr.rserialize(true) << "" << "hash" << trans_c.root->get_hash().to_hex() << "\n" << "lt" << trans.lt << "\n" - << "time" << trans.now << "\n" + << "time" << trans.now << " (" << time_to_human(trans.now) << ")\n" << "out messages"; vm::Dictionary dict{trans.r1.out_msgs, 15}; for (td::int32 i = 0; i < trans.outmsg_cnt; i++) { @@ -456,7 +485,7 @@ HttpAnswer& HttpAnswer::operator<<(BlockHeaderCell head_c) { << "block" << block_id.id.to_str() << "\n" << "roothash" << block_id.root_hash.to_hex() << "\n" << "filehash" << block_id.file_hash.to_hex() << "\n" - << "time" << info.gen_utime << "\n" + << "time" << info.gen_utime << " (" << time_to_human(info.gen_utime) << ")\n" << "lt" << info.start_lt << " .. " << info.end_lt << "\n" << "global_id" << blk.global_id << "\n" << "version" << info.version << "\n" @@ -543,7 +572,8 @@ HttpAnswer& HttpAnswer::operator<<(BlockShardsCell shards_c) { ton::ShardIdFull shard{id.workchain, id.shard}; if (ref.not_null()) { *this << "" << shard.to_str() << "top_block_id()} - << "\">" << ref->top_block_id().id.seqno << "" << ref->created_at() << "" + << "\">" << ref->top_block_id().id.seqno << "created_at()) << "\">" << ref->created_at() << "" << "" << ref->want_split_ << "" << "" << ref->want_merge_ << "" << "" << ref->before_split_ << "" From 6e6081c6573aba798a45733478701860755c502a Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 23 Oct 2023 09:49:02 +0300 Subject: [PATCH 18/71] Add 2023.10 update changelog --- Changelog.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/Changelog.md b/Changelog.md index 1deebc61..fba3c5c5 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,3 +1,9 @@ +## 2023.10 Update +1. A series of additional security checks in node: special cells in action list, init state in external messages, peers data prior to saving to disk. +2. Human-readable timestamps in explorer + +Besides the work of the core team, this update is based on the efforts of @akifoq and @mr-tron. + ## 2023.06 Update 1. (disabled by default) New deflation mechanisms: partial fee burning and blackhole address 2. Storage-contract improvement From 77847968fa45cda16f3b3f2501c851d430957b57 Mon Sep 17 00:00:00 2001 From: neodiX42 Date: Mon, 23 Oct 2023 12:31:40 +0200 Subject: [PATCH 19/71] fix openSSL path (#783) --- .github/workflows/tonlib-android-jni.yml | 6 +++--- example/android/build.sh | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/tonlib-android-jni.yml b/.github/workflows/tonlib-android-jni.yml index cdc16841..4a4f57cd 100644 --- a/.github/workflows/tonlib-android-jni.yml +++ b/.github/workflows/tonlib-android-jni.yml @@ -19,9 +19,9 @@ jobs: sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build - name: Configure & Build - run: | - wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip - unzip android-ndk-r25b-linux.zip + run: | + wget -q https://dl.google.com/android/repository/android-ndk-r25b-linux.zip + unzip -q android-ndk-r25b-linux.zip export JAVA_AWT_LIBRARY=NotNeeded export JAVA_JVM_LIBRARY=NotNeeded export JAVA_INCLUDE_PATH=${JAVA_HOME}/include diff --git a/example/android/build.sh b/example/android/build.sh index 61681da2..2a699d33 100755 --- a/example/android/build.sh +++ b/example/android/build.sh @@ -25,7 +25,7 @@ elif [ $ARCH == "arm64" ] then ABI="arm64-v8a" fi - +ORIG_ARCH=$ARCH ARCH=$ABI echo $ABI @@ -33,7 +33,7 @@ mkdir -p build-$ARCH cd build-$ARCH -cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -GNinja -DANDROID_ABI=${ABI} -DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ARCH} -DTON_ARCH="" -DTON_ONLY_TONLIB=ON -DBUILD_SHARED_LIBS=OFF || exit 1 +cmake .. -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake -DCMAKE_BUILD_TYPE=Release -GNinja -DANDROID_ABI=${ABI} -DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ORIG_ARCH} -DTON_ARCH="" -DTON_ONLY_TONLIB=ON -DBUILD_SHARED_LIBS=OFF || exit 1 ninja native-lib || exit 1 popd From a1d2d7cb04e5297f3cdb0dee9bd54987b4582ad3 Mon Sep 17 00:00:00 2001 From: neodiX42 Date: Mon, 23 Oct 2023 13:30:26 +0200 Subject: [PATCH 20/71] upgrade nixpkgs to v22 (#784) --- .github/workflows/ton-x86-64-linux.yml | 8 ++++++-- .github/workflows/ton-x86-64-macos.yml | 8 ++++++-- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ton-x86-64-linux.yml b/.github/workflows/ton-x86-64-linux.yml index 7043d054..059384d7 100644 --- a/.github/workflows/ton-x86-64-linux.yml +++ b/.github/workflows/ton-x86-64-linux.yml @@ -14,14 +14,18 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 - - uses: cachix/install-nix-action@v18 + - uses: cachix/install-nix-action@v22 with: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - name: Compile - run: nix build .?submodules=1#packages.x86_64-linux.ton-oldglibc_staticbinaries --print-build-logs --system x86_64-linux -o result-x86_64 + run: | + git submodule sync --recursive + git submodule update + nix build .?submodules=1#packages.x86_64-linux.ton-oldglibc_staticbinaries --print-build-logs --system x86_64-linux -o result-x86_64 - name: Copy binaries run: | diff --git a/.github/workflows/ton-x86-64-macos.yml b/.github/workflows/ton-x86-64-macos.yml index 8175a366..f0b47365 100644 --- a/.github/workflows/ton-x86-64-macos.yml +++ b/.github/workflows/ton-x86-64-macos.yml @@ -10,14 +10,18 @@ jobs: - uses: actions/checkout@v3 with: submodules: 'recursive' + fetch-depth: 0 - - uses: cachix/install-nix-action@v18 + - uses: cachix/install-nix-action@v22 with: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - name: Compile - run: nix build .?submodules=1#packages.x86_64-darwin.ton-staticbin-dylib --print-build-logs -o result-x86_64-darwin + run: | + git submodule sync --recursive + git submodule update + nix build .?submodules=1#packages.x86_64-darwin.ton-staticbin-dylib --print-build-logs -o result-x86_64-darwin - name: Copy binaries run: | From 2bfa6240dd5bf156e4756623c6e079000f3e53b8 Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Wed, 25 Oct 2023 12:20:23 +0300 Subject: [PATCH 21/71] Support wallet-v4 in tonlib (#785) --- crypto/CMakeLists.txt | 2 + crypto/smc-envelope/GenericAccount.cpp | 2 +- crypto/smc-envelope/SmartContractCode.cpp | 23 ++++++- crypto/smc-envelope/SmartContractCode.h | 11 +++- crypto/smc-envelope/WalletV4.cpp | 71 +++++++++++++++++++++ crypto/smc-envelope/WalletV4.h | 46 ++++++++++++++ crypto/test/test-smartcont.cpp | 2 + tl/generate/scheme/tonlib_api.tl | 2 + tl/generate/scheme/tonlib_api.tlo | Bin 32948 -> 33224 bytes tonlib/tonlib/TonlibClient.cpp | 73 +++++++++++++++++++++- 10 files changed, 226 insertions(+), 6 deletions(-) create mode 100644 crypto/smc-envelope/WalletV4.cpp create mode 100644 crypto/smc-envelope/WalletV4.h diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index 13e4c5bd..9f7a0a5a 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -236,6 +236,7 @@ set(SMC_ENVELOPE_SOURCE smc-envelope/SmartContractCode.cpp smc-envelope/WalletInterface.cpp smc-envelope/WalletV3.cpp + smc-envelope/WalletV4.cpp smc-envelope/GenericAccount.h smc-envelope/HighloadWallet.h @@ -246,6 +247,7 @@ set(SMC_ENVELOPE_SOURCE smc-envelope/SmartContractCode.h smc-envelope/WalletInterface.h smc-envelope/WalletV3.h + smc-envelope/WalletV4.h ) set(ED25519_TEST_SOURCE diff --git a/crypto/smc-envelope/GenericAccount.cpp b/crypto/smc-envelope/GenericAccount.cpp index 4cd6bf3f..04249699 100644 --- a/crypto/smc-envelope/GenericAccount.cpp +++ b/crypto/smc-envelope/GenericAccount.cpp @@ -155,7 +155,7 @@ td::Result GenericAccount::get_wallet_id(const SmartContract& sc) { return TRY_VM([&]() -> td::Result { auto answer = sc.run_get_method("wallet_id"); if (!answer.success) { - return td::Status::Error("seqno get method failed"); + return td::Status::Error("wallet_id get method failed"); } return static_cast(answer.stack.write().pop_long_range(std::numeric_limits::max())); }()); diff --git a/crypto/smc-envelope/SmartContractCode.cpp b/crypto/smc-envelope/SmartContractCode.cpp index d10c4b5c..585450f6 100644 --- a/crypto/smc-envelope/SmartContractCode.cpp +++ b/crypto/smc-envelope/SmartContractCode.cpp @@ -28,6 +28,7 @@ namespace { // WALLET_REVISION = 2; // WALLET2_REVISION = 2; // WALLET3_REVISION = 2; +// WALLET4_REVISION = 2; // HIGHLOAD_WALLET_REVISION = 2; // HIGHLOAD_WALLET2_REVISION = 2; // DNS_REVISION = 1; @@ -92,6 +93,20 @@ const auto& get_map() { "AAXrc52omhpn5jrhf/AABesePaiaGmPmOuFj8ABDbbYHwR7Z5AOAQm1B1tnkA4BTu1E0IEBQNch0x/" "0BNEC2zz4J28QAoAg9HtvpTGX+gAwoXC2CZEw4g8AOiGOETGA8/gzIG6SMHCU0NcLH+IB3yGSAaGSW3/iAAzTB9QC+wAAHssfFMsfEsv/yx/" "0AMntVA=="); + with_tvm_code( + "wallet-v4-r2", + "te6cckECFAEAAtQAART/APSkE/S88sgLAQIBIAIDAgFIBAUE+PKDCNcYINMf0x/THwL4I7vyZO1E0NMf0x/T//" + "QE0VFDuvKhUVG68qIF+QFUEGT5EPKj+AAkpMjLH1JAyx9SMMv/" + "UhD0AMntVPgPAdMHIcAAn2xRkyDXSpbTB9QC+wDoMOAhwAHjACHAAuMAAcADkTDjDQOkyMsfEssfy/" + "8QERITAubQAdDTAyFxsJJfBOAi10nBIJJfBOAC0x8hghBwbHVnvSKCEGRzdHK9sJJfBeAD+kAwIPpEAcjKB8v/" + "ydDtRNCBAUDXIfQEMFyBAQj0Cm+hMbOSXwfgBdM/" + "yCWCEHBsdWe6kjgw4w0DghBkc3RyupJfBuMNBgcCASAICQB4AfoA9AQw+CdvIjBQCqEhvvLgUIIQcGx1Z4MesXCAGFAEywUmzxZY+" + "gIZ9ADLaRfLH1Jgyz8gyYBA+wAGAIpQBIEBCPRZMO1E0IEBQNcgyAHPFvQAye1UAXKwjiOCEGRzdHKDHrFwgBhQBcsFUAPPFiP6AhPLassfyz/" + "JgED7AJJfA+ICASAKCwBZvSQrb2omhAgKBrkPoCGEcNQICEekk30pkQzmkD6f+YN4EoAbeBAUiYcVnzGEAgFYDA0AEbjJftRNDXCx+" + "AA9sp37UTQgQFA1yH0BDACyMoHy//J0AGBAQj0Cm+hMYAIBIA4PABmtznaiaEAga5Drhf/AABmvHfaiaEAQa5DrhY/AAG7SB/" + "oA1NQi+QAFyMoHFcv/ydB3dIAYyMsFywIizxZQBfoCFMtrEszMyXP7AMhAFIEBCPRR8qcCAHCBAQjXGPoA0z/" + "IVCBHgQEI9FHyp4IQbm90ZXB0gBjIywXLAlAGzxZQBPoCFMtqEssfyz/Jc/sAAgBsgQEI1xj6ANM/" + "MFIkgQEI9Fnyp4IQZHN0cnB0gBjIywXLAlAFzxZQA/oCE8tqyx8Syz/Jc/sAAAr0AMntVGliJeU="); return map; }(); return map; @@ -137,9 +152,12 @@ td::Span SmartContractCode::get_revisions(Type type) { static int res[] = {1}; return res; } + case Type::WalletV4: { + static int res[] = {2}; + return res; + } } UNREACHABLE(); - return {}; } td::Result SmartContractCode::validate_revision(Type type, int revision) { @@ -179,9 +197,10 @@ td::Ref SmartContractCode::get_code(Type type, int ext_revision) { return "payment-channel"; case Type::RestrictedWallet: return "restricted-wallet3"; + case Type::WalletV4: + return "wallet-v4"; } UNREACHABLE(); - return ""; }(type); if (revision == -1) { return load(basename).move_as_ok(); diff --git a/crypto/smc-envelope/SmartContractCode.h b/crypto/smc-envelope/SmartContractCode.h index 85be3531..be50d2a1 100644 --- a/crypto/smc-envelope/SmartContractCode.h +++ b/crypto/smc-envelope/SmartContractCode.h @@ -26,7 +26,16 @@ class SmartContractCode { public: static td::Result> load(td::Slice name); - enum Type { WalletV3 = 4, HighloadWalletV1, HighloadWalletV2, ManualDns, Multisig, PaymentChannel, RestrictedWallet }; + enum Type { + WalletV3 = 4, + HighloadWalletV1, + HighloadWalletV2, + ManualDns, + Multisig, + PaymentChannel, + RestrictedWallet, + WalletV4 + }; static td::Span get_revisions(Type type); static td::Result validate_revision(Type type, int revision); static td::Ref get_code(Type type, int revision = 0); diff --git a/crypto/smc-envelope/WalletV4.cpp b/crypto/smc-envelope/WalletV4.cpp new file mode 100644 index 00000000..738fa9c7 --- /dev/null +++ b/crypto/smc-envelope/WalletV4.cpp @@ -0,0 +1,71 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "WalletV4.h" +#include "GenericAccount.h" +#include "SmartContractCode.h" + +#include "vm/boc.h" +#include "vm/cells/CellString.h" +#include "td/utils/base64.h" + +#include + +namespace ton { +td::Result> WalletV4::make_a_gift_message(const td::Ed25519::PrivateKey& private_key, + td::uint32 valid_until, td::Span gifts) const { + CHECK(gifts.size() <= get_max_gifts_size()); + TRY_RESULT(seqno, get_seqno()); + TRY_RESULT(wallet_id, get_wallet_id()); + vm::CellBuilder cb; + cb.store_long(wallet_id, 32).store_long(valid_until, 32).store_long(seqno, 32); + cb.store_long(0, 8); // The only difference with wallet-v3 + + for (auto& gift : gifts) { + td::int32 send_mode = 3; + if (gift.gramms == -1) { + send_mode += 128; + } + if (gift.send_mode > -1) { + send_mode = gift.send_mode; + } + cb.store_long(send_mode, 8).store_ref(create_int_message(gift)); + } + + auto message_outer = cb.finalize(); + auto signature = private_key.sign(message_outer->get_hash().as_slice()).move_as_ok(); + return vm::CellBuilder().store_bytes(signature).append_cellslice(vm::load_cell_slice(message_outer)).finalize(); +} + +td::Ref WalletV4::get_init_data(const InitData& init_data) noexcept { + return vm::CellBuilder() + .store_long(init_data.seqno, 32) + .store_long(init_data.wallet_id, 32) + .store_bytes(init_data.public_key) + .store_zeroes(1) // plugins dict + .finalize(); +} + +td::Result WalletV4::get_wallet_id() const { + return TRY_VM([&]() -> td::Result { + auto answer = run_get_method("get_subwallet_id"); + if (!answer.success) { + return td::Status::Error("get_subwallet_id get method failed"); + } + return static_cast(answer.stack.write().pop_long_range(std::numeric_limits::max())); + }()); +} +} // namespace ton diff --git a/crypto/smc-envelope/WalletV4.h b/crypto/smc-envelope/WalletV4.h new file mode 100644 index 00000000..721e8103 --- /dev/null +++ b/crypto/smc-envelope/WalletV4.h @@ -0,0 +1,46 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#pragma once + +#include "smc-envelope/SmartContract.h" +#include "smc-envelope/WalletInterface.h" +#include "vm/cells.h" +#include "Ed25519.h" +#include "block/block.h" +#include "vm/cells/CellString.h" + +namespace ton { + +struct WalletV4Traits { + using InitData = WalletInterface::DefaultInitData; + + static constexpr unsigned max_message_size = vm::CellString::max_bytes; + static constexpr unsigned max_gifts_size = 4; + static constexpr auto code_type = SmartContractCode::WalletV4; +}; + +class WalletV4 : public WalletBase { + public: + explicit WalletV4(State state) : WalletBase(std::move(state)) { + } + td::Result> make_a_gift_message(const td::Ed25519::PrivateKey& private_key, td::uint32 valid_until, + td::Span gifts) const override; + static td::Ref get_init_data(const InitData& init_data) noexcept; + + td::Result get_wallet_id() const override; +}; +} // namespace ton \ No newline at end of file diff --git a/crypto/test/test-smartcont.cpp b/crypto/test/test-smartcont.cpp index 673bb758..d3f18813 100644 --- a/crypto/test/test-smartcont.cpp +++ b/crypto/test/test-smartcont.cpp @@ -35,6 +35,7 @@ #include "smc-envelope/SmartContract.h" #include "smc-envelope/SmartContractCode.h" #include "smc-envelope/WalletV3.h" +#include "smc-envelope/WalletV4.h" #include "smc-envelope/HighloadWallet.h" #include "smc-envelope/HighloadWalletV2.h" #include "smc-envelope/PaymentChannel.h" @@ -526,6 +527,7 @@ void do_test_wallet() { TEST(Tonlib, Wallet) { do_test_wallet(); + do_test_wallet(); do_test_wallet(); do_test_wallet(); do_test_wallet(); diff --git a/tl/generate/scheme/tonlib_api.tl b/tl/generate/scheme/tonlib_api.tl index af08cbd7..a6172376 100644 --- a/tl/generate/scheme/tonlib_api.tl +++ b/tl/generate/scheme/tonlib_api.tl @@ -61,6 +61,7 @@ pchan.config alice_public_key:string alice_address:accountAddress bob_public_key raw.initialAccountState code:bytes data:bytes = InitialAccountState; wallet.v3.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; +wallet.v4.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; wallet.highload.v1.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; wallet.highload.v2.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; @@ -73,6 +74,7 @@ pchan.initialAccountState config:pchan.config = InitialAccountState; raw.accountState code:bytes data:bytes frozen_hash:bytes = AccountState; wallet.v3.accountState wallet_id:int64 seqno:int32 = AccountState; +wallet.v4.accountState wallet_id:int64 seqno:int32 = AccountState; wallet.highload.v1.accountState wallet_id:int64 seqno:int32 = AccountState; wallet.highload.v2.accountState wallet_id:int64 = AccountState; dns.accountState wallet_id:int64 = AccountState; diff --git a/tl/generate/scheme/tonlib_api.tlo b/tl/generate/scheme/tonlib_api.tlo index 6644d118ca6b8efa6a9ae2fd60439bea157bc17a..7657852eabd565bb5432eb13eeae0c48166cd309 100644 GIT binary patch delta 512 zcmdne$aJEaX~GWC@Xfm~@Hi$X=a=S{1eYY1q%trta8CRvzVXQ#CP}B6Ck_aE=4F;- zCgvck;Q%Vwe1Q26595T*dj;YdSwN;tRuC$IFlGp8Ko|kSAqF5RpqXHBRgNpBY`WeR zvGT;6oYWG%G84T-WUIl(05w5$%Lp65je#&$z(no{n?OWFL{uOQxJZcz)ba%)3m{T1 zqEIzCqEH7u5rwJ|5QCVr*+Z;{1ETtd+yoYozbEI&2S6AHWIkQCH63?T0u zJ@oQ|EW$gPn1RK>05%S&8DdI+f(6_-2;&8YNQ0sdL~4U#1B79sq=BYuj}lbP8zqSQ oHY+HFZX0t+43JXMajJ(d|0#TODJJLK@SwSvin7lU4e6vkXk0=0WdV=5p diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index 74b427db..521f961a 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -31,6 +31,7 @@ #include "smc-envelope/GenericAccount.h" #include "smc-envelope/ManualDns.h" #include "smc-envelope/WalletV3.h" +#include "smc-envelope/WalletV4.h" #include "smc-envelope/HighloadWallet.h" #include "smc-envelope/HighloadWalletV2.h" #include "smc-envelope/PaymentChannel.h" @@ -228,6 +229,14 @@ td::Result to_init_data(const tonlib_api::wallet_v3_ini return std::move(init_data); } +td::Result to_init_data(const tonlib_api::wallet_v4_initialAccountState& wallet_state) { + TRY_RESULT(key_bytes, get_public_key(wallet_state.public_key_)); + ton::WalletV4::InitData init_data; + init_data.public_key = td::SecureString(key_bytes.key); + init_data.wallet_id = static_cast(wallet_state.wallet_id_); + return std::move(init_data); +} + td::Result to_init_data(const tonlib_api::rwallet_initialAccountState& rwallet_state) { TRY_RESULT(init_key_bytes, get_public_key(rwallet_state.init_public_key_)); TRY_RESULT(key_bytes, get_public_key(rwallet_state.public_key_)); @@ -319,6 +328,16 @@ class AccountState { return tonlib_api::make_object(static_cast(wallet_id), static_cast(seqno)); } + td::Result> to_wallet_v4_accountState() const { + if (wallet_type_ != WalletV4) { + return TonlibError::AccountTypeUnexpected("WalletV4"); + } + auto wallet = ton::WalletV4(get_smc_state()); + TRY_RESULT(seqno, wallet.get_seqno()); + TRY_RESULT(wallet_id, wallet.get_wallet_id()); + return tonlib_api::make_object(static_cast(wallet_id), + static_cast(seqno)); + } td::Result> to_wallet_highload_v1_accountState() const { if (wallet_type_ != HighloadWalletV1) { @@ -420,6 +439,8 @@ class AccountState { return f(to_dns_accountState()); case PaymentChannel: return f(to_payment_channel_accountState()); + case WalletV4: + return f(to_wallet_v4_accountState()); } UNREACHABLE(); } @@ -458,7 +479,8 @@ class AccountState { HighloadWalletV2, ManualDns, PaymentChannel, - RestrictedWallet + RestrictedWallet, + WalletV4 }; WalletType get_wallet_type() const { return wallet_type_; @@ -477,6 +499,7 @@ class AccountState { case AccountState::HighloadWalletV1: case AccountState::HighloadWalletV2: case AccountState::RestrictedWallet: + case AccountState::WalletV4: return true; } UNREACHABLE(); @@ -497,6 +520,8 @@ class AccountState { return td::make_unique(get_smc_state()); case AccountState::RestrictedWallet: return td::make_unique(get_smc_state()); + case AccountState::WalletV4: + return td::make_unique(get_smc_state()); } UNREACHABLE(); return {}; @@ -554,6 +579,23 @@ class AccountState { break; } }, + [&](tonlib_api::wallet_v4_initialAccountState& v4wallet) { + for (auto revision : ton::SmartContractCode::get_revisions(ton::SmartContractCode::WalletV4)) { + auto init_data = to_init_data(v4wallet); + if (init_data.is_error()) { + continue; + } + auto wallet = ton::WalletV4::create(init_data.move_as_ok(), revision); + if (!(wallet->get_address(ton::masterchainId) == address_ || + wallet->get_address(ton::basechainId) == address_)) { + continue; + } + wallet_type_ = WalletType::WalletV4; + wallet_revision_ = revision; + set_new_state(wallet->get_state()); + break; + } + }, [&](tonlib_api::rwallet_initialAccountState& rwallet) { for (auto revision : ton::SmartContractCode::get_revisions(ton::SmartContractCode::RestrictedWallet)) { auto r_init_data = to_init_data(rwallet); @@ -597,7 +639,7 @@ class AccountState { return wallet_type_; } auto wallet_id = static_cast(address_.workchain + wallet_id_); - ton::WalletV3::InitData init_data{key.as_octet_string(), wallet_id}; + ton::WalletInterface::DefaultInitData init_data{key.as_octet_string(), wallet_id}; auto o_revision = ton::WalletV3::guess_revision(address_, init_data); if (o_revision) { wallet_type_ = WalletType::WalletV3; @@ -605,6 +647,13 @@ class AccountState { set_new_state(ton::WalletV3::get_init_state(wallet_revision_, init_data)); return wallet_type_; } + o_revision = ton::WalletV4::guess_revision(address_, init_data); + if (o_revision) { + wallet_type_ = WalletType::WalletV4; + wallet_revision_ = o_revision.value(); + set_new_state(ton::WalletV4::get_init_state(wallet_revision_, init_data)); + return wallet_type_; + } o_revision = ton::HighloadWalletV2::guess_revision(address_, init_data); if (o_revision) { wallet_type_ = WalletType::HighloadWalletV2; @@ -682,6 +731,12 @@ class AccountState { wallet_revision_ = o_revision.value(); return wallet_type_; } + o_revision = ton::WalletV4::guess_revision(code_hash); + if (o_revision) { + wallet_type_ = WalletType::WalletV4; + wallet_revision_ = o_revision.value(); + return wallet_type_; + } o_revision = ton::HighloadWalletV2::guess_revision(code_hash); if (o_revision) { wallet_type_ = WalletType::HighloadWalletV2; @@ -2256,6 +2311,13 @@ td::Result get_account_address(const tonlib_api::wallet_v3_in ->get_address(workchain_id); } +td::Result get_account_address(const tonlib_api::wallet_v4_initialAccountState& test_wallet_state, + td::int32 revision, ton::WorkchainId workchain_id) { + TRY_RESULT(key_bytes, get_public_key(test_wallet_state.public_key_)); + return ton::WalletV4::create({key_bytes.key, static_cast(test_wallet_state.wallet_id_)}, revision) + ->get_address(workchain_id); +} + td::Result get_account_address( const tonlib_api::wallet_highload_v1_initialAccountState& test_wallet_state, td::int32 revision, ton::WorkchainId workchain_id) { @@ -2303,6 +2365,7 @@ static td::optional get_wallet_type(tonlib_api::In td::overloaded( [](const tonlib_api::raw_initialAccountState&) { return td::optional(); }, [](const tonlib_api::wallet_v3_initialAccountState&) { return ton::SmartContractCode::WalletV3; }, + [](const tonlib_api::wallet_v4_initialAccountState&) { return ton::SmartContractCode::WalletV4; }, [](const tonlib_api::wallet_highload_v1_initialAccountState&) { return ton::SmartContractCode::HighloadWalletV1; }, @@ -2392,6 +2455,12 @@ td::Status TonlibClient::do_request(tonlib_api::guessAccount& request, sources.push_back(Source{tonlib_api::make_object( request.public_key_, wallet_id_ + ton::basechainId), ton::basechainId}); + sources.push_back(Source{tonlib_api::make_object( + request.public_key_, wallet_id_ + ton::masterchainId), + ton::masterchainId}); + sources.push_back(Source{tonlib_api::make_object( + request.public_key_, wallet_id_ + ton::basechainId), + ton::basechainId}); for (Source& source : sources) { auto o_type = get_wallet_type(*source.init_state); if (!o_type) { From 6a0d14f8edce6cd631f353de0b6389f128dcea8b Mon Sep 17 00:00:00 2001 From: Marat <98183742+dungeon-master-666@users.noreply.github.com> Date: Thu, 26 Oct 2023 14:25:37 +0200 Subject: [PATCH 22/71] [tonlib] Handle special cell in msg body (#789) --- tonlib/tonlib/TonlibClient.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index 521f961a..0eabe142 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -2974,7 +2974,7 @@ struct ToRawTransactions { if (type == 0 || type == 0x2167da4b) { td::Status status; - auto r_body_message = vm::CellString::load(body.write()); + auto r_body_message = TRY_VM(vm::CellString::load(body.write())); LOG_IF(WARNING, r_body_message.is_error()) << "Failed to parse a message: " << r_body_message.error(); if (r_body_message.is_ok()) { From 06e22bdb2e1750f1b98aa9cab1b73e6604997cdd Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Sat, 28 Oct 2023 19:05:00 +0300 Subject: [PATCH 23/71] Add "showtransactions" to tonlib-cli (#790) Co-authored-by: SpyCheese --- tonlib/tonlib/tonlib-cli.cpp | 43 ++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tonlib/tonlib/tonlib-cli.cpp b/tonlib/tonlib/tonlib-cli.cpp index fee07496..1107a5d0 100644 --- a/tonlib/tonlib/tonlib-cli.cpp +++ b/tonlib/tonlib/tonlib-cli.cpp @@ -423,6 +423,8 @@ class TonlibCli : public td::actor::Actor { td::TerminalIO::out() << "exportkeypem [] - export key\n"; td::TerminalIO::out() << "gethistory - get history fo simple wallet with requested key (last 10 transactions)\n"; + td::TerminalIO::out() << "showtransactions [] - show transaction on account " + "with given and (in base64) and previous transactions (up to ).\n"; td::TerminalIO::out() << "init - init simple wallet with requested key\n"; td::TerminalIO::out() << "transfer[f][F][e][k][c] ( |) - " "make transfer from \n" @@ -520,6 +522,8 @@ class TonlibCli : public td::actor::Actor { } else if (cmd == "getmasterchainsignatures") { auto seqno = parser.read_word(); run_get_masterchain_block_signatures(seqno, std::move(cmd_promise)); + } else if (cmd == "showtransactions") { + run_show_transactions(parser, std::move(cmd_promise)); } else { cmd_promise.set_error(td::Status::Error(PSLICE() << "Unkwnown query `" << cmd << "`")); } @@ -2162,6 +2166,45 @@ class TonlibCli : public td::actor::Actor { })); } + void run_show_transactions(td::ConstParser& parser, td::Promise promise) { + TRY_RESULT_PROMISE(promise, address, to_account_address(parser.read_word(), false)); + TRY_RESULT_PROMISE(promise, lt, td::to_integer_safe(parser.read_word())); + TRY_RESULT_PROMISE(promise, hash, td::base64_decode(parser.read_word())); + int count = 1; + if (!parser.empty()) { + TRY_RESULT_PROMISE_ASSIGN(promise, count, td::to_integer_safe(parser.read_word())); + } + auto id = make_object(lt, hash); + send_query(make_object( + nullptr, ton::move_tl_object_as(std::move(address.address)), + std::move(id), count, false), + promise.wrap([](ton::tl_object_ptr&& result) -> td::Result { + td::TerminalIO::out() << "Found " << result->transactions_.size() << " transactions\n"; + for (size_t i = 0; i < result->transactions_.size(); ++i) { + td::TerminalIO::out() << "Transaction #" << i << "\n"; + auto& tr = result->transactions_[i]; + TRY_RESULT(root, vm::std_boc_deserialize(tr->data_)); + block::gen::Transaction::Record trans; + if (!tlb::unpack_cell(root, trans)) { + return td::Status::Error("cannot unpack transaction"); + } + td::TerminalIO::out() << "Transaction Account: " << tr->address_->account_address_ << "\n"; + td::TerminalIO::out() << "Transaction LT: " << tr->transaction_id_->lt_ << "\n"; + td::TerminalIO::out() << "Transaction Hash: " << td::base64_encode(tr->transaction_id_->hash_) + << "\n"; + td::TerminalIO::out() << "Transaction Timestamp: " << tr->utime_ << "\n"; + td::TerminalIO::out() << "Transaction Out messages: " << tr->out_msgs_.size() << "\n"; + td::TerminalIO::out() << "Previous transaction LT: " << trans.prev_trans_lt << "\n"; + td::TerminalIO::out() << "Previous transaction Hash: " + << td::base64_encode(trans.prev_trans_hash.as_slice()) << "\n"; + std::ostringstream ss; + block::gen::t_Transaction.print_ref(2048, ss, root); + td::TerminalIO::out() << "Transaction dump: " << ss.str() << "\n"; + } + return td::Unit(); + })); + } + void get_history2(td::Slice key, td::Result> r_state, td::Promise promise) { TRY_RESULT_PROMISE(promise, state, std::move(r_state)); From 89700cb2aa96cf5561f5aa028e46f928cf90ec38 Mon Sep 17 00:00:00 2001 From: neodiX42 Date: Mon, 30 Oct 2023 09:52:00 +0100 Subject: [PATCH 24/71] CI: fix macOS and tonlib Android GitHub actions in testnet (#786) * fix macOS github actions * fix android tonlib GH action; * fixing wasm GH action * strip binaries * fix randomly failing ubuntu and wasm GH actions * fix randomly failing ubuntu and wasm GH actions * revert some changes --- .github/script/fift-func-wasm-build-ubuntu.sh | 33 ++++++++++++++++--- .github/workflows/macos-11.7-compile.yml | 27 +++++++++++++-- .github/workflows/macos-12.6-compile.yml | 27 ++++++++++++--- .github/workflows/ton-wasm-emscripten.yml | 8 ++++- .github/workflows/tonlib-android-jni.yml | 6 ++-- .github/workflows/ubuntu-22.04-compile.yml | 8 +++-- .github/workflows/ubuntu-compile.yml | 6 +++- .github/workflows/win-2019-compile.yml | 2 +- example/android/build.sh | 4 +-- 9 files changed, 99 insertions(+), 22 deletions(-) diff --git a/.github/script/fift-func-wasm-build-ubuntu.sh b/.github/script/fift-func-wasm-build-ubuntu.sh index 95de71f9..259377cc 100755 --- a/.github/script/fift-func-wasm-build-ubuntu.sh +++ b/.github/script/fift-func-wasm-build-ubuntu.sh @@ -3,8 +3,8 @@ # dependencies: #sudo apt-get install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip nodejs libevent-dev -export CC=$(which clang) -export CXX=$(which clang++) +export CC=$(which clang-16) +export CXX=$(which clang++-16) export CCACHE_DISABLE=1 cd ../.. @@ -37,7 +37,15 @@ cd .. mkdir build cd build -cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DZLIB_LIBRARY=/usr/lib/x86_64-linux-gnu/libz.so -DZLIB_INCLUDE_DIR=$ZLIB_DIR -DOPENSSL_ROOT_DIR=$OPENSSL_DIR -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so -DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.so -DTON_USE_ABSEIL=OFF .. +cmake -GNinja -DCMAKE_BUILD_TYPE=Release \ +-DCMAKE_CXX_STANDARD=17 \ +-DZLIB_LIBRARY=/usr/lib/x86_64-linux-gnu/libz.so \ +-DZLIB_INCLUDE_DIR=$ZLIB_DIR \ +-DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ +-DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ +-DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so \ +-DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.so \ +-DTON_USE_ABSEIL=OFF .. test $? -eq 0 || { echo "Can't configure TON build"; exit 1; } @@ -92,8 +100,23 @@ test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; } cd ../build -emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release -DZLIB_LIBRARY=$ZLIB_DIR/libz.a -DZLIB_INCLUDE_DIR=$ZLIB_DIR -DOPENSSL_ROOT_DIR=$OPENSSL_DIR -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.a -DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.a -DCMAKE_TOOLCHAIN_FILE=$EMSDK_DIR/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake -DCMAKE_CXX_FLAGS="-sUSE_ZLIB=1" -DSECP256K1_INCLUDE_DIR=$SECP256K1_DIR/include -DSECP256K1_LIBRARY=$SECP256K1_DIR/.libs/libsecp256k1.a -DSODIUM_INCLUDE_DIR=$SODIUM_DIR/src/libsodium/include -DSODIUM_LIBRARY_RELEASE=$SODIUM_DIR/src/libsodium/.libs/libsodium.a -DSODIUM_LIBRARY_DEBUG=$SODIUM_DIR/src/libsodium/.libs/libsodium.a -DSODIUM_USE_STATIC_LIBS=ON .. -test $? -eq 0 || { echo "Can't configure TON with with emmake "; exit 1; } +emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release \ +-DZLIB_LIBRARY=$ZLIB_DIR/libz.a \ +-DZLIB_INCLUDE_DIR=$ZLIB_DIR \ +-DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ +-DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ +-DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.a \ +-DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.a \ +-DCMAKE_TOOLCHAIN_FILE=$EMSDK_DIR/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake \ +-DCMAKE_CXX_FLAGS="-sUSE_ZLIB=1" \ +-DSECP256K1_INCLUDE_DIR=$SECP256K1_DIR/include \ +-DSECP256K1_LIBRARY=$SECP256K1_DIR/.libs/libsecp256k1.a \ +-DSODIUM_INCLUDE_DIR=$SODIUM_DIR/src/libsodium/include \ +-DSODIUM_LIBRARY_RELEASE=$SODIUM_DIR/src/libsodium/.libs/libsodium.a \ +-DSODIUM_LIBRARY_DEBUG=$SODIUM_DIR/src/libsodium/.libs/libsodium.a \ +-DSODIUM_USE_STATIC_LIBS=ON .. + +test $? -eq 0 || { echo "Can't configure TON with emmake "; exit 1; } cp -R ../crypto/smartcont ../crypto/fift/lib crypto emmake make -j16 funcfiftlib func fift tlbc emulator-emscripten diff --git a/.github/workflows/macos-11.7-compile.yml b/.github/workflows/macos-11.7-compile.yml index 910b16fc..100d51c8 100644 --- a/.github/workflows/macos-11.7-compile.yml +++ b/.github/workflows/macos-11.7-compile.yml @@ -14,22 +14,43 @@ jobs: submodules: 'recursive' - name: Compile OpenSSL run: | + export NONINTERACTIVE=1 + brew install ninja secp256k1 libsodium libmicrohttpd pkg-config automake libtool git clone https://github.com/openssl/openssl openssl_1_1_1 cd openssl_1_1_1 git checkout OpenSSL_1_1_1-stable - ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=11.7 + ./Configure darwin64-x86_64-cc -static -mmacosx-version-min=11.7 make build_libs -j4 - name: Build all run: | - export NONINTERACTIVE=1 - brew install ninja secp256k1 libsodium libmicrohttpd pkg-config + brew unlink openssl@3 rootPath=`pwd` mkdir build cd build cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + - name: Strip binaries + run: | + strip build/storage/storage-daemon/storage-daemon + strip build/storage/storage-daemon/storage-daemon-cli + strip build/blockchain-explorer/blockchain-explorer + strip build/crypto/fift + strip build/crypto/func + strip build/crypto/create-state + strip build/crypto/tlbc + strip build/validator-engine-console/validator-engine-console + strip build/tonlib/tonlib-cli + strip build/http/http-proxy + strip build/rldp-http-proxy/rldp-http-proxy + strip build/dht-server/dht-server + strip build/lite-client/lite-client + strip build/validator-engine/validator-engine + strip build/utils/generate-random-id + strip build/utils/json2tlo + strip build/adnl/adnl-proxy + - name: Find & copy binaries run: | mkdir artifacts diff --git a/.github/workflows/macos-12.6-compile.yml b/.github/workflows/macos-12.6-compile.yml index 368abf02..8660a10b 100644 --- a/.github/workflows/macos-12.6-compile.yml +++ b/.github/workflows/macos-12.6-compile.yml @@ -15,11 +15,11 @@ jobs: - name: Compile OpenSSL run: | export NONINTERACTIVE=1 - brew install ninja libsodium automake + brew install ninja secp256k1 libsodium libmicrohttpd pkg-config automake libtool git clone https://github.com/openssl/openssl openssl_1_1_1 cd openssl_1_1_1 git checkout OpenSSL_1_1_1-stable - ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=12.6 + ./Configure darwin64-x86_64-cc -static -mmacosx-version-min=12.6 make build_libs -j4 - name: Compile Secp256k1 @@ -33,14 +33,33 @@ jobs: - name: Build all run: | - export NONINTERACTIVE=1 - brew install ninja libmicrohttpd pkg-config + brew unlink openssl@3 rootPath=`pwd` mkdir build cd build cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=12.6 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + - name: Strip binaries + run: | + strip build/storage/storage-daemon/storage-daemon + strip build/storage/storage-daemon/storage-daemon-cli + strip build/blockchain-explorer/blockchain-explorer + strip build/crypto/fift + strip build/crypto/func + strip build/crypto/create-state + strip build/crypto/tlbc + strip build/validator-engine-console/validator-engine-console + strip build/tonlib/tonlib-cli + strip build/http/http-proxy + strip build/rldp-http-proxy/rldp-http-proxy + strip build/dht-server/dht-server + strip build/lite-client/lite-client + strip build/validator-engine/validator-engine + strip build/utils/generate-random-id + strip build/utils/json2tlo + strip build/adnl/adnl-proxy + - name: Find & copy binaries run: | mkdir artifacts diff --git a/.github/workflows/ton-wasm-emscripten.yml b/.github/workflows/ton-wasm-emscripten.yml index 40a65818..a3167800 100644 --- a/.github/workflows/ton-wasm-emscripten.yml +++ b/.github/workflows/ton-wasm-emscripten.yml @@ -15,7 +15,13 @@ jobs: - name: Install libraries run: | sudo apt update - sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip nodejs libsecp256k1-dev libsodium-dev + sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip nodejs libsecp256k1-dev libsodium-dev automake libtool + + - name: Setup compiler + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 16 all - name: Configure & Build run: | diff --git a/.github/workflows/tonlib-android-jni.yml b/.github/workflows/tonlib-android-jni.yml index cdf410b4..6e04f8b7 100644 --- a/.github/workflows/tonlib-android-jni.yml +++ b/.github/workflows/tonlib-android-jni.yml @@ -19,9 +19,9 @@ jobs: sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build - name: Configure & Build - run: | - wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip - unzip android-ndk-r25b-linux.zip + run: | + wget -q https://dl.google.com/android/repository/android-ndk-r25b-linux.zip + unzip -q android-ndk-r25b-linux.zip export JAVA_AWT_LIBRARY=NotNeeded export JAVA_JVM_LIBRARY=NotNeeded export JAVA_INCLUDE_PATH=${JAVA_HOME}/include diff --git a/.github/workflows/ubuntu-22.04-compile.yml b/.github/workflows/ubuntu-22.04-compile.yml index 397116ff..b5d4d571 100644 --- a/.github/workflows/ubuntu-22.04-compile.yml +++ b/.github/workflows/ubuntu-22.04-compile.yml @@ -39,13 +39,17 @@ jobs: mkdir build cd build - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DCMAKE_CXX_FLAGS="-mavx2" .. + cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state emulator + - name: Strip binaries + run: | + strip -g build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.* + - name: Find & copy binaries run: | mkdir artifacts - cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.* artifacts + cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.* artifacts chmod +x artifacts/* cp -R crypto/smartcont artifacts/ cp -R crypto/fift/lib artifacts/ diff --git a/.github/workflows/ubuntu-compile.yml b/.github/workflows/ubuntu-compile.yml index 171ba46d..be3878f5 100644 --- a/.github/workflows/ubuntu-compile.yml +++ b/.github/workflows/ubuntu-compile.yml @@ -43,9 +43,13 @@ jobs: cd .. buildPath=`pwd` - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$buildPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$buildPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DCMAKE_CXX_FLAGS="-mavx2" .. + cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$buildPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$buildPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator + - name: Strip binaries + run: | + strip -g build-${{ matrix.os }}/storage/storage-daemon/storage-daemon build-${{ matrix.os }}/storage/storage-daemon/storage-daemon-cli build-${{ matrix.os }}/crypto/fift build-${{ matrix.os }}/crypto/tlbc build-${{ matrix.os }}/crypto/func build-${{ matrix.os }}/crypto/create-state build-${{ matrix.os }}/validator-engine-console/validator-engine-console build-${{ matrix.os }}/tonlib/tonlib-cli build-${{ matrix.os }}/tonlib/libtonlibjson.so.0.5 build-${{ matrix.os }}/http/http-proxy build-${{ matrix.os }}/rldp-http-proxy/rldp-http-proxy build-${{ matrix.os }}/dht-server/dht-server build-${{ matrix.os }}/lite-client/lite-client build-${{ matrix.os }}/validator-engine/validator-engine build-${{ matrix.os }}/utils/generate-random-id build-${{ matrix.os }}/utils/json2tlo build-${{ matrix.os }}/adnl/adnl-proxy build-${{ matrix.os }}/emulator/libemulator.* + - name: Find & copy binaries run: | mkdir artifacts-${{ matrix.os }} diff --git a/.github/workflows/win-2019-compile.yml b/.github/workflows/win-2019-compile.yml index 1edcb0e1..9b4d3bef 100644 --- a/.github/workflows/win-2019-compile.yml +++ b/.github/workflows/win-2019-compile.yml @@ -92,7 +92,7 @@ jobs: mkdir artifacts\smartcont mkdir artifacts\lib - for %%I in (build\storage\storage-daemon\Release\storage-daemon.exe build\storage\storage-daemon\Release\storage-daemon-cli.exe build\blockchain-explorer\blockchain-explorer.exe build\crypto\Release\fift.exe build\crypto\Release\tlbc.exe build\crypto\Release\func.exe build\crypto\Release\create-state.exe build\validator-engine-console\Release\validator-engine-console.exe build\tonlib\Release\tonlib-cli.exe build\tonlib\Release\tonlibjson.dll build\http\Release\http-proxy.exe build\rldp-http-proxy\Release\rldp-http-proxy.exe build\dht-server\Release\dht-server.exe build\lite-client\Release\lite-client.exe build\validator-engine\Release\validator-engine.exe build\utils\Release\generate-random-id.exe build\utils\Release\json2tlo.exe build\adnl\Release\adnl-proxy.exe build\emulator\Release\emulator.dll) do copy %%I artifacts\ + for %%I in (build\storage\storage-daemon\Release\storage-daemon.exe build\storage\storage-daemon\Release\storage-daemon-cli.exe build\blockchain-explorer\blockchain-explorer.exe build\crypto\Release\fift.exe build\crypto\Release\tlbc.exe build\crypto\Release\func.exe build\crypto\Release\create-state.exe build\validator-engine-console\Release\validator-engine-console.exe build\tonlib\Release\tonlib-cli.exe build\tonlib\Release\tonlibjson.dll build\http\Release\http-proxy.exe build\rldp-http-proxy\Release\rldp-http-proxy.exe build\dht-server\Release\dht-server.exe build\lite-client\Release\lite-client.exe build\validator-engine\Release\validator-engine.exe build\utils\Release\generate-random-id.exe build\utils\Release\json2tlo.exe build\adnl\Release\adnl-proxy.exe build\emulator\Release\emulator.dll) do (strip -g %%I & copy %%I artifacts\) xcopy /e /k /h /i crypto\smartcont artifacts\smartcont xcopy /e /k /h /i crypto\fift\lib artifacts\lib diff --git a/example/android/build.sh b/example/android/build.sh index 8cecfa41..7f170dbc 100755 --- a/example/android/build.sh +++ b/example/android/build.sh @@ -36,7 +36,7 @@ then BLST_LIBRARY=$(pwd)/third_party/blst/armv8/libblst.a fi - +ORIG_ARCH=$ARCH ARCH=$ABI mkdir -p build-$ARCH @@ -46,7 +46,7 @@ cmake .. -GNinja -DPORTABLE=1 \ -DANDROID_ABI=x86 -DANDROID_PLATFORM=android-32 -DANDROID_NDK=${ANDROID_NDK_ROOT} \ -DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ -DCMAKE_BUILD_TYPE=Release -DANDROID_ABI=${ABI} \ --DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ARCH} -DTON_ARCH="" \ +-DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ORIG_ARCH} -DTON_ARCH="" \ -DTON_ONLY_TONLIB=ON \ -DSECP256K1_INCLUDE_DIR=${SECP256K1_INCLUDE_DIR} -DSECP256K1_LIBRARY=${SECP256K1_LIBRARY} \ -DSODIUM_INCLUDE_DIR=${SODIUM_INCLUDE_DIR} -DSODIUM_LIBRARY_RELEASE=${SODIUM_LIBRARY_RELEASE} \ From 5847897b3758bc9ea85af38e7be8fc867e4c133a Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Fri, 3 Nov 2023 14:43:34 +0300 Subject: [PATCH 25/71] Various changes in TVM, github builds and tests (#793) * Bugfixes in TVM and node * Upgrade to C++17 * Improve GitHub builds * Fix existing tests and partially integrate them into builds --------- Co-authored-by: neodiX42 Co-authored-by: EmelyanenkoK --- .clang_complete | 2 +- .github/script/fift-func-wasm-build-ubuntu.sh | 2 +- .github/workflows/macos-11.7-compile.yml | 32 +- .github/workflows/macos-12.6-compile.yml | 29 +- .github/workflows/ton-x86-64-linux.yml | 2 +- .github/workflows/ton-x86-64-macos.yml | 2 +- .github/workflows/ubuntu-22.04-compile.yml | 20 +- .github/workflows/ubuntu-compile.yml | 21 +- .github/workflows/win-2019-compile.yml | 15 +- CMakeLists.txt | 67 +- README.md | 5 + adnl/CMakeLists.txt | 2 +- blockchain-explorer/CMakeLists.txt | 2 +- blockchain-explorer/blockchain-explorer.cpp | 4 +- catchain/CMakeLists.txt | 2 +- common/CMakeLists.txt | 2 +- create-hardfork/CMakeLists.txt | 2 +- create-hardfork/create-hardfork.cpp | 4 +- crypto/CMakeLists.txt | 2 +- crypto/block/transaction.cpp | 663 +++++- crypto/block/transaction.h | 4 +- crypto/common/bitstring.cpp | 4 +- crypto/fift/lib/Asm.fif | 94 +- crypto/fift/words.cpp | 2 +- crypto/func/auto-tests/legacy_tester.py | 118 +- crypto/func/auto-tests/run_tests.py | 36 +- crypto/openssl/bignum.cpp | 12 +- crypto/openssl/bignum.h | 2 +- crypto/parser/lexer.cpp | 1 - crypto/smc-envelope/ManualDns.h | 4 +- crypto/smc-envelope/SmartContract.cpp | 2 +- crypto/test/fift.cpp | 2 +- crypto/test/fift/rist255.fif | 3 + crypto/test/modbigint.cpp | 6 +- crypto/test/test-bigint.cpp | 6 +- crypto/test/test-db.cpp | 8 +- crypto/test/test-smartcont.cpp | 13 +- crypto/test/vm.cpp | 2 +- crypto/tl/tlbc-gen-cpp.cpp | 1 - crypto/tl/tlbc.cpp | 2 - crypto/vm/arithops.cpp | 44 +- crypto/vm/tonops.cpp | 37 +- crypto/vm/vm.cpp | 13 + crypto/vm/vm.h | 2 + dht-server/CMakeLists.txt | 2 +- dht/CMakeLists.txt | 2 +- emulator/CMakeLists.txt | 2 +- emulator/emulator-extern.cpp | 4 +- emulator/transaction-emulator.cpp | 4 +- fec/CMakeLists.txt | 2 +- fec/fec.cpp | 2 +- flake.lock | 6 +- flake.nix | 8 +- http/CMakeLists.txt | 2 +- keyring/CMakeLists.txt | 2 +- keys/CMakeLists.txt | 2 +- lite-client/CMakeLists.txt | 2 +- lite-client/lite-client.cpp | 2 +- memprof/CMakeLists.txt | 2 +- overlay/CMakeLists.txt | 2 +- rldp-http-proxy/CMakeLists.txt | 2 +- rldp/CMakeLists.txt | 2 +- rldp2/CMakeLists.txt | 2 +- storage/CMakeLists.txt | 2 +- storage/storage-daemon/CMakeLists.txt | 2 +- storage/test/storage.cpp | 3 - tdactor/CMakeLists.txt | 2 +- tdactor/benchmark/CMakeLists.txt | 2 +- tdactor/test/actors_core.cpp | 4 +- tdactor/test/actors_promise.cpp | 2 +- tddb/CMakeLists.txt | 2 +- tddb/td/db/utils/BlobView.cpp | 1 + tdfec/CMakeLists.txt | 2 +- tdfec/benchmark/CMakeLists.txt | 2 +- tdfec/td/fec/raptorq/Rfc.h | 2 +- tdnet/CMakeLists.txt | 2 +- tdtl/CMakeLists.txt | 2 +- tdutils/CMakeLists.txt | 4 +- tdutils/generate/CMakeLists.txt | 2 +- tdutils/td/utils/BigNum.cpp | 4 + tdutils/td/utils/BufferedUdp.h | 1 + tdutils/td/utils/crypto.cpp | 50 +- tdutils/td/utils/crypto.h | 2 +- tdutils/td/utils/port/stacktrace.cpp | 45 +- tdutils/test/MpmcWaiter.cpp | 6 +- terminal/CMakeLists.txt | 2 +- test/ed25519_crypto.cpp | 2053 ----------------- test/regression-tests.ans | 68 +- test/test-adnl.cpp | 2 +- test/test-catchain.cpp | 8 +- test/test-dht.cpp | 2 +- test/test-ext-client.cpp | 220 -- test/test-ext-server.cpp | 221 -- test/test-node.cpp | 376 --- test/test-rldp.cpp | 2 +- test/test-rldp2.cpp | 2 +- test/test-ton-collator.cpp | 4 +- test/test-validator-session-state.cpp | 1 + test/test-validator-session.cpp | 356 --- test/test-validator.cpp | 356 --- third-party/blst | 2 +- third-party/rocksdb | 2 +- tl-utils/CMakeLists.txt | 2 +- tl/CMakeLists.txt | 2 +- tl/generate/CMakeLists.txt | 2 +- tl/tl/tl_json.h | 3 +- tonlib/CMakeLists.txt | 2 +- tonlib/test/offline.cpp | 25 +- tonlib/tonlib/TonlibClient.cpp | 2 +- utils/CMakeLists.txt | 2 +- utils/opcode-timing.cpp | 3 +- validator-engine-console/CMakeLists.txt | 2 +- validator-engine/CMakeLists.txt | 2 +- validator-engine/validator-engine.cpp | 4 +- validator-session/CMakeLists.txt | 2 +- validator/CMakeLists.txt | 2 +- validator/impl/CMakeLists.txt | 2 +- validator/impl/collator-impl.h | 8 +- validator/impl/collator.cpp | 962 +++++++- validator/impl/external-message.cpp | 2 +- validator/impl/validate-query.cpp | 786 ++++++- validator/impl/validate-query.hpp | 3 +- 122 files changed, 2889 insertions(+), 4100 deletions(-) delete mode 100644 test/ed25519_crypto.cpp delete mode 100644 test/test-ext-client.cpp delete mode 100644 test/test-ext-server.cpp delete mode 100644 test/test-node.cpp delete mode 100644 test/test-validator-session.cpp delete mode 100644 test/test-validator.cpp diff --git a/.clang_complete b/.clang_complete index d7cda0ff..e88dbc7b 100644 --- a/.clang_complete +++ b/.clang_complete @@ -1,5 +1,5 @@ -xc++ --std=c++14 +-std=c++17 -iquote . -iquote tdtl/ -iquote tl/ diff --git a/.github/script/fift-func-wasm-build-ubuntu.sh b/.github/script/fift-func-wasm-build-ubuntu.sh index 259377cc..feac19e3 100755 --- a/.github/script/fift-func-wasm-build-ubuntu.sh +++ b/.github/script/fift-func-wasm-build-ubuntu.sh @@ -13,7 +13,7 @@ echo `pwd` git clone https://github.com/openssl/openssl.git cd openssl -git checkout OpenSSL_1_1_1j +git checkout checkout openssl-3.1.4 ./config make -j16 OPENSSL_DIR=`pwd` diff --git a/.github/workflows/macos-11.7-compile.yml b/.github/workflows/macos-11.7-compile.yml index 100d51c8..eb12db1b 100644 --- a/.github/workflows/macos-11.7-compile.yml +++ b/.github/workflows/macos-11.7-compile.yml @@ -12,24 +12,33 @@ jobs: uses: actions/checkout@v3 with: submodules: 'recursive' - - name: Compile OpenSSL + + - name: Compile Secp256k1 run: | export NONINTERACTIVE=1 brew install ninja secp256k1 libsodium libmicrohttpd pkg-config automake libtool - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./Configure darwin64-x86_64-cc -static -mmacosx-version-min=11.7 - make build_libs -j4 + git clone https://github.com/libbitcoin/secp256k1.git + cd secp256k1 + ./autogen.sh + ./configure --enable-module-recovery + make + make install - name: Build all run: | - brew unlink openssl@3 + brew unlink openssl@1.1 + brew install openssl@3 + brew unlink openssl@3 && brew link --overwrite openssl@3 rootPath=`pwd` mkdir build cd build - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. - ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + cmake -GNinja -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. + + ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ + test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - name: Strip binaries run: | @@ -51,6 +60,11 @@ jobs: strip build/utils/json2tlo strip build/adnl/adnl-proxy + - name: Run tests + run: | + cd build + ctest --output-on-failure -E "test-catchain|test-actors" + - name: Find & copy binaries run: | mkdir artifacts diff --git a/.github/workflows/macos-12.6-compile.yml b/.github/workflows/macos-12.6-compile.yml index 8660a10b..f41efc66 100644 --- a/.github/workflows/macos-12.6-compile.yml +++ b/.github/workflows/macos-12.6-compile.yml @@ -12,18 +12,11 @@ jobs: uses: actions/checkout@v3 with: submodules: 'recursive' - - name: Compile OpenSSL - run: | - export NONINTERACTIVE=1 - brew install ninja secp256k1 libsodium libmicrohttpd pkg-config automake libtool - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./Configure darwin64-x86_64-cc -static -mmacosx-version-min=12.6 - make build_libs -j4 - name: Compile Secp256k1 run: | + export NONINTERACTIVE=1 + brew install ninja secp256k1 libsodium libmicrohttpd pkg-config automake libtool git clone https://github.com/libbitcoin/secp256k1.git cd secp256k1 ./autogen.sh @@ -33,12 +26,19 @@ jobs: - name: Build all run: | - brew unlink openssl@3 + brew unlink openssl@1.1 + brew install openssl@3 + brew unlink openssl@3 && brew link --overwrite openssl@3 rootPath=`pwd` mkdir build cd build - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=12.6 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. - ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + cmake -GNinja -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=12.6 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. + + ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ + test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - name: Strip binaries run: | @@ -60,6 +60,11 @@ jobs: strip build/utils/json2tlo strip build/adnl/adnl-proxy + - name: Run tests + run: | + cd build + ctest --output-on-failure -E "test-catchain|test-actors" + - name: Find & copy binaries run: | mkdir artifacts diff --git a/.github/workflows/ton-x86-64-linux.yml b/.github/workflows/ton-x86-64-linux.yml index e939f22f..a4760dc4 100644 --- a/.github/workflows/ton-x86-64-linux.yml +++ b/.github/workflows/ton-x86-64-linux.yml @@ -15,7 +15,7 @@ jobs: with: submodules: 'recursive' - - uses: cachix/install-nix-action@v18 + - uses: cachix/install-nix-action@v23 with: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ton-x86-64-macos.yml b/.github/workflows/ton-x86-64-macos.yml index b9e9cf9b..cea2937a 100644 --- a/.github/workflows/ton-x86-64-macos.yml +++ b/.github/workflows/ton-x86-64-macos.yml @@ -11,7 +11,7 @@ jobs: with: submodules: 'recursive' - - uses: cachix/install-nix-action@v22 + - uses: cachix/install-nix-action@v23 with: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/ubuntu-22.04-compile.yml b/.github/workflows/ubuntu-22.04-compile.yml index b5d4d571..af8943a1 100644 --- a/.github/workflows/ubuntu-22.04-compile.yml +++ b/.github/workflows/ubuntu-22.04-compile.yml @@ -28,9 +28,9 @@ jobs: export CXX=$(which clang++) export CCACHE_DISABLE=1 - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + git checkout openssl-3.1.4 ./config make build_libs -j4 @@ -39,13 +39,23 @@ jobs: mkdir build cd build - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. - ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state emulator + cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_3/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_3/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. + + ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client \ + pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ + test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - name: Strip binaries run: | strip -g build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.* + - name: Run tests + run: | + cd build + ctest --output-on-failure -E "test-catchain|test-actors" + - name: Find & copy binaries run: | mkdir artifacts diff --git a/.github/workflows/ubuntu-compile.yml b/.github/workflows/ubuntu-compile.yml index be3878f5..3c1e7bad 100644 --- a/.github/workflows/ubuntu-compile.yml +++ b/.github/workflows/ubuntu-compile.yml @@ -34,22 +34,31 @@ jobs: mkdir build-${{ matrix.os }} cd build-${{ matrix.os }} - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + git checkout openssl-3.1.4 ./config make build_libs -j4 cd .. - buildPath=`pwd` + rootPath=`pwd` - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$buildPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$buildPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. - ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator + cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_3/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_3/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. + ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client \ + pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy \ + create-state create-hardfork emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ + test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - name: Strip binaries run: | strip -g build-${{ matrix.os }}/storage/storage-daemon/storage-daemon build-${{ matrix.os }}/storage/storage-daemon/storage-daemon-cli build-${{ matrix.os }}/crypto/fift build-${{ matrix.os }}/crypto/tlbc build-${{ matrix.os }}/crypto/func build-${{ matrix.os }}/crypto/create-state build-${{ matrix.os }}/validator-engine-console/validator-engine-console build-${{ matrix.os }}/tonlib/tonlib-cli build-${{ matrix.os }}/tonlib/libtonlibjson.so.0.5 build-${{ matrix.os }}/http/http-proxy build-${{ matrix.os }}/rldp-http-proxy/rldp-http-proxy build-${{ matrix.os }}/dht-server/dht-server build-${{ matrix.os }}/lite-client/lite-client build-${{ matrix.os }}/validator-engine/validator-engine build-${{ matrix.os }}/utils/generate-random-id build-${{ matrix.os }}/utils/json2tlo build-${{ matrix.os }}/adnl/adnl-proxy build-${{ matrix.os }}/emulator/libemulator.* + - name: Run tests + run: | + cd build-${{ matrix.os }} + ctest --output-on-failure -E "test-catchain|test-actors" + - name: Find & copy binaries run: | mkdir artifacts-${{ matrix.os }} diff --git a/.github/workflows/win-2019-compile.yml b/.github/workflows/win-2019-compile.yml index 9b4d3bef..e94655f7 100644 --- a/.github/workflows/win-2019-compile.yml +++ b/.github/workflows/win-2019-compile.yml @@ -49,10 +49,10 @@ jobs: curl -Lo libsodium-1.0.18-stable-msvc.zip https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable-msvc.zip unzip libsodium-1.0.18-stable-msvc.zip - - name: Install pre-compiled OpenSSL Win64 + - name: Install pre-compiled OpenSSL 3 Win64 run: | - curl -Lo openssl-1.1.1j.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1j.zip - jar xf openssl-1.1.1j.zip + curl -Lo openssl-3.1.4.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-3.1.4.zip + unzip openssl-3.1.4.zip - name: Install pre-compiled libmicrohttpd Win64 run: | @@ -72,8 +72,13 @@ jobs: echo %SODIUM_DIR% mkdir build cd build - cmake -DCMAKE_BUILD_TYPE=Release -DSODIUM_USE_STATIC_LIBS=1 -DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include -DSECP256K1_LIBRARY=%root%\secp256k1\bin\x64\Release\v142\static\secp256k1.lib -DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include\readline -DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1.1j/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1.1j/lib/libcrypto_static.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. - cmake --build . --target storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator --config Release + cmake -DSODIUM_USE_STATIC_LIBS=1 -DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include -DSECP256K1_LIBRARY=%root%\secp256k1\bin\x64\Release\v142\static\secp256k1.lib -DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include\readline -DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-3.1.4/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-3.1.4/x64/lib/libcrypto_static.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. + cmake --build . --config Release --target storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state + + - name: Run tests + run: | + cd build + ctest -C Release --output-on-failure -E "test-catchain|test-actors|test-validator-session-state" - name: Show executables run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index 4b6f0dc9..c6d7ed87 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.1 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) project(TON VERSION 0.5 LANGUAGES C CXX) set(CMAKE_POSITION_INDEPENDENT_CODE ON) @@ -79,7 +79,7 @@ else() set(HAVE_SSE42 FALSE) endif() -set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) set(CMAKE_CXX_EXTENSIONS FALSE) @@ -255,6 +255,9 @@ if (MSVC) add_definitions(-D_SCL_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP /W4 /wd4100 /wd4127 /wd4324 /wd4456 /wd4457 /wd4458 /wd4505 /wd4702") elseif (CLANG OR GCC) + if (GCC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrong-eval-order=some") + endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") if (APPLE) #use "-Wl,-exported_symbols_list,${CMAKE_CURRENT_SOURCE_DIR}/export_list" for exported symbols @@ -447,6 +450,10 @@ target_link_libraries(test-smartcont PRIVATE smc-envelope fift-lib ton_db) add_executable(test-bigint ${BIGINT_TEST_SOURCE}) target_link_libraries(test-bigint PRIVATE ton_crypto) +if (WINGETOPT_FOUND) + target_link_libraries_system(test-bigint wingetopt) +endif() + add_executable(test-cells test/test-td-main.cpp ${CELLS_TEST_SOURCE}) target_link_libraries(test-cells PRIVATE ton_crypto) @@ -513,26 +520,12 @@ target_link_libraries(test-rldp2 adnl adnltest dht rldp2 tl_api) add_executable(test-validator-session-state test/test-validator-session-state.cpp) target_link_libraries(test-validator-session-state adnl dht rldp validatorsession tl_api) -#add_executable(test-node test/test-node.cpp) -#target_link_libraries(test-node overlay tdutils tdactor adnl tl_api dht -# catchain validatorsession) - add_executable(test-catchain test/test-catchain.cpp) target_link_libraries(test-catchain overlay tdutils tdactor adnl adnltest rldp tl_api dht catchain ) -#add_executable(test-validator-session test/test-validator-session.cpp) -#target_link_libraries(test-validator-session overlay tdutils tdactor adnl tl_api dht -# catchain validatorsession) add_executable(test-ton-collator test/test-ton-collator.cpp) target_link_libraries(test-ton-collator overlay tdutils tdactor adnl tl_api dht catchain validatorsession validator-disk ton_validator validator-disk ) -#add_executable(test-validator test/test-validator.cpp) -#target_link_libraries(test-validator overlay tdutils tdactor adnl tl_api dht -# rldp catchain validatorsession ton-node validator ton_validator validator memprof ${JEMALLOC_LIBRARIES}) -#add_executable(test-ext-server test/test-ext-server.cpp) -#target_link_libraries(test-ext-server tdutils tdactor adnl tl_api dht ) -#add_executable(test-ext-client test/test-ext-client.cpp) -#target_link_libraries(test-ext-client tdutils tdactor adnl tl_api tl-lite-utils) add_executable(test-http test/test-http.cpp) target_link_libraries(test-http PRIVATE tonhttp) @@ -574,13 +567,53 @@ add_test(test-tdutils test-tdutils) add_test(test-tonlib-offline test-tonlib-offline) #END tonlib +# FunC tests +if (NOT NIX) + if (MSVC) + set(PYTHON_VER "python") + else() + set(PYTHON_VER "python3") + endif() + add_test( + NAME test-func + COMMAND ${PYTHON_VER} run_tests.py tests/ + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/crypto/func/auto-tests) + if (WIN32) + set_property(TEST test-func PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/func.exe" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/fift.exe" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + else() + set_property(TEST test-func PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + endif() + + add_test( + NAME test-func-legacy + COMMAND ${PYTHON_VER} legacy_tester.py + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/crypto/func/auto-tests) + if (WIN32) + set_property(TEST test-func-legacy PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/func.exe" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/fift.exe" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + else() + set_property(TEST test-func-legacy PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + endif() +endif() + #BEGIN internal if (NOT TON_ONLY_TONLIB) add_test(test-adnl test-adnl) add_test(test-dht test-dht) add_test(test-rldp test-rldp) add_test(test-rldp2 test-rldp2) -#add_test(test-validator-session-state test-validator-session-state) +add_test(test-validator-session-state test-validator-session-state) add_test(test-catchain test-catchain) add_test(test-fec test-fec) diff --git a/README.md b/README.md index 6bc3b1f5..12d05190 100644 --- a/README.md +++ b/README.md @@ -66,3 +66,8 @@ If a CI workflow fails not because of your changes but workflow issues, try to f * **C/C++ CI (ccpp-linux.yml)**: TBD * **C/C++ CI Win64 Compile (ccpp-win64.yml)**: TBD + + +## Running tests + +Tests are executed by running `ctest` in the build directory. \ No newline at end of file diff --git a/adnl/CMakeLists.txt b/adnl/CMakeLists.txt index b287cba0..217a9624 100644 --- a/adnl/CMakeLists.txt +++ b/adnl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #BEGIN internal if (NOT TON_ONLY_TONLIB) diff --git a/blockchain-explorer/CMakeLists.txt b/blockchain-explorer/CMakeLists.txt index 11328a7a..fc94e709 100644 --- a/blockchain-explorer/CMakeLists.txt +++ b/blockchain-explorer/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(NIX "Use \"ON\" for a static build." OFF) diff --git a/blockchain-explorer/blockchain-explorer.cpp b/blockchain-explorer/blockchain-explorer.cpp index 035b84ea..bf41b31f 100644 --- a/blockchain-explorer/blockchain-explorer.cpp +++ b/blockchain-explorer/blockchain-explorer.cpp @@ -52,7 +52,7 @@ #include "vm/boc.h" #include "vm/cellops.h" #include "vm/cells/MerkleProof.h" -#include "vm/cp0.h" +#include "vm/vm.h" #include "auto/tl/lite_api.h" #include "ton/lite-tl.hpp" @@ -654,7 +654,7 @@ int main(int argc, char* argv[]) { }); #endif - vm::init_op_cp0(); + vm::init_vm().ensure(); td::actor::Scheduler scheduler({2}); scheduler_ptr = &scheduler; diff --git a/catchain/CMakeLists.txt b/catchain/CMakeLists.txt index a57d3788..8ab9525d 100644 --- a/catchain/CMakeLists.txt +++ b/catchain/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 96d92371..88a3671b 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(COMMON_SOURCE checksum.h diff --git a/create-hardfork/CMakeLists.txt b/create-hardfork/CMakeLists.txt index 3d78c118..41b94b52 100644 --- a/create-hardfork/CMakeLists.txt +++ b/create-hardfork/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/create-hardfork/create-hardfork.cpp b/create-hardfork/create-hardfork.cpp index 15533768..5f0b93be 100644 --- a/create-hardfork/create-hardfork.cpp +++ b/create-hardfork/create-hardfork.cpp @@ -49,7 +49,7 @@ #include "validator/fabric.h" #include "validator/impl/collator.h" -#include "crypto/vm/cp0.h" +#include "crypto/vm/vm.h" #include "crypto/block/block-db.h" #include "common/errorlog.h" @@ -307,7 +307,7 @@ int main(int argc, char *argv[]) { SET_VERBOSITY_LEVEL(verbosity_INFO); td::set_default_failure_signal_handler().ensure(); - CHECK(vm::init_op_cp0()); + vm::init_vm().ensure(); td::actor::ActorOwn x; diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index 9f7a0a5a..62b0d216 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 0c77317f..9df5be13 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -28,9 +28,20 @@ #include "td/utils/Timer.h" namespace { +/** + * Logger that stores the tail of log messages. + * + * @param max_size The size of the buffer. Default is 256. + */ class StringLoggerTail : public td::LogInterface { public: explicit StringLoggerTail(size_t max_size = 256) : buf(max_size, '\0') {} + + /** + * Appends a slice of data to the buffer. + * + * @param slice The slice of data to be appended. + */ void append(td::CSlice slice) override { if (slice.size() > buf.size()) { slice.remove_prefix(slice.size() - buf.size()); @@ -46,6 +57,12 @@ class StringLoggerTail : public td::LogInterface { slice.remove_prefix(s); } } + + /** + * Retrieves the tail of the log. + * + * @returns The log as std::string. + */ std::string get_log() const { if (truncated) { std::string res = buf; @@ -55,6 +72,7 @@ class StringLoggerTail : public td::LogInterface { return buf.substr(0, pos); } } + private: std::string buf; size_t pos = 0; @@ -65,6 +83,13 @@ class StringLoggerTail : public td::LogInterface { namespace block { using td::Ref; +/** + * Looks up a library among public libraries. + * + * @param key A constant bit pointer representing the key of the library to lookup. + * + * @returns A reference to the library cell if found, null otherwise. + */ Ref ComputePhaseConfig::lookup_library(td::ConstBitPtr key) const { return libraries ? vm::lookup_library_in(key, libraries->get_root_cell()) : Ref{}; } @@ -75,12 +100,27 @@ Ref ComputePhaseConfig::lookup_library(td::ConstBitPtr key) const { * */ +/** + * Sets the address of the account. + * + * @param wc The workchain ID of the account. + * @param new_addr The new address of the account. + * + * @returns True if the address was successfully set, false otherwise. + */ bool Account::set_address(ton::WorkchainId wc, td::ConstBitPtr new_addr) { workchain = wc; addr = new_addr; return true; } +/** + * Sets the split depth of the account. + * + * @param new_split_depth The new split depth value to be set. + * + * @returns True if the split depth was successfully set, False otherwise. + */ bool Account::set_split_depth(int new_split_depth) { if (new_split_depth < 0 || new_split_depth > 30) { return false; // invalid value for split_depth @@ -94,11 +134,26 @@ bool Account::set_split_depth(int new_split_depth) { } } +/** + * Checks if the given split depth is valid for the Account. + * + * @param split_depth The split depth to be checked. + * + * @returns True if the split depth is valid, False otherwise. + */ bool Account::check_split_depth(int split_depth) const { return split_depth_set_ ? (split_depth == split_depth_) : (split_depth >= 0 && split_depth <= 30); } -// initializes split_depth and addr_rewrite +/** + * Parses anycast data of the account address. + * + * Initializes split_depth and addr_rewrite. + * + * @param cs The cell slice containing partially-parsed account addressa. + * + * @returns True if parsing was successful, false otherwise. + */ bool Account::parse_maybe_anycast(vm::CellSlice& cs) { int t = (int)cs.fetch_ulong(1); if (t < 0) { @@ -113,6 +168,13 @@ bool Account::parse_maybe_anycast(vm::CellSlice& cs) { && set_split_depth(depth); } +/** + * Stores the anycast information to a serialized account address. + * + * @param cb The vm::CellBuilder object to store the information in. + * + * @returns True if the anycast information was successfully stored, false otherwise. + */ bool Account::store_maybe_anycast(vm::CellBuilder& cb) const { if (!split_depth_set_ || !split_depth_) { return cb.store_bool_bool(false); @@ -122,6 +184,13 @@ bool Account::store_maybe_anycast(vm::CellBuilder& cb) const { && cb.store_bits_bool(addr_rewrite.cbits(), split_depth_); // rewrite_pfx:(bits depth) } +/** + * Unpacks the address from a given CellSlice. + * + * @param addr_cs The CellSlice containing the address. + * + * @returns True if the address was successfully unpacked, False otherwise. + */ bool Account::unpack_address(vm::CellSlice& addr_cs) { int addr_tag = block::gen::t_MsgAddressInt.get_tag(addr_cs); int new_wc = ton::workchainInvalid; @@ -172,6 +241,15 @@ bool Account::unpack_address(vm::CellSlice& addr_cs) { return true; } +/** + * Unpacks storage information from a CellSlice. + * + * Storage information is serialized using StorageInfo TLB-scheme. + * + * @param cs The CellSlice containing the storage information. + * + * @returns True if the unpacking is successful, false otherwise. + */ bool Account::unpack_storage_info(vm::CellSlice& cs) { block::gen::StorageInfo::Record info; block::gen::StorageUsed::Record used; @@ -198,7 +276,16 @@ bool Account::unpack_storage_info(vm::CellSlice& cs) { return (u != std::numeric_limits::max()); } -// initializes split_depth (from account state - StateInit) +/** + * Unpacks the state of an Account from a CellSlice. + * + * State is serialized using StateInit TLB-scheme. + * Initializes split_depth (from account state - StateInit) + * + * @param cs The CellSlice containing the serialized state. + * + * @returns True if the state was successfully unpacked, False otherwise. + */ bool Account::unpack_state(vm::CellSlice& cs) { block::gen::StateInit::Record state; if (!tlb::unpack_exact(cs, state)) { @@ -226,6 +313,13 @@ bool Account::unpack_state(vm::CellSlice& cs) { return true; } +/** + * Computes the address of the account. + * + * @param force If set to true, the address will be recomputed even if it already exists. + * + * @returns True if the address was successfully computed, false otherwise. + */ bool Account::compute_my_addr(bool force) { if (!force && my_addr.not_null() && my_addr_exact.not_null()) { return true; @@ -266,6 +360,15 @@ bool Account::compute_my_addr(bool force) { return true; } +/** + * Computes the address of the Account. + * + * @param tmp_addr A reference to the CellSlice for the result. + * @param split_depth The split depth for the address. + * @param orig_addr_rewrite Address prefox of length split_depth. + * + * @returns True if the address was successfully computed, false otherwise. + */ bool Account::recompute_tmp_addr(Ref& tmp_addr, int split_depth, td::ConstBitPtr orig_addr_rewrite) const { if (!split_depth && my_addr_exact.not_null()) { @@ -307,6 +410,14 @@ bool Account::recompute_tmp_addr(Ref& tmp_addr, int split_depth, (tmp_addr = vm::load_cell_slice_ref(std::move(cell))).not_null(); } +/** + * Sets address rewriting info for a newly-activated account. + * + * @param split_depth The split depth for the account address. + * @param orig_addr_rewrite Address frepix of length split_depth. + * + * @returns True if the rewriting info was successfully set, false otherwise. + */ bool Account::init_rewrite_addr(int split_depth, td::ConstBitPtr orig_addr_rewrite) { if (split_depth_set_ || !set_split_depth(split_depth)) { return false; @@ -317,8 +428,18 @@ bool Account::init_rewrite_addr(int split_depth, td::ConstBitPtr orig_addr_rewri return compute_my_addr(true); } -// used to unpack previously existing accounts -bool Account::unpack(Ref shard_account, Ref extra, ton::UnixTime now, bool special) { +/** + * Unpacks the account information from the provided CellSlice. + * + * Used to unpack previously existing accounts. + * + * @param shard_account The ShardAccount to unpack. + * @param now The current Unix time. + * @param special Flag indicating if the account is special. + * + * @returns True if the unpacking is successful, false otherwise. + */ +bool Account::unpack(Ref shard_account, ton::UnixTime now, bool special) { LOG(DEBUG) << "unpacking " << (special ? "special " : "") << "account " << addr.to_hex(); if (shard_account.is_null()) { LOG(ERROR) << "account " << addr.to_hex() << " does not have a valid ShardAccount to unpack"; @@ -386,7 +507,13 @@ bool Account::unpack(Ref shard_account, Ref extra, return true; } -// used to initialize new accounts +/** + * Initializes a new Account object. + * + * @param now The current Unix time. + * + * @returns True if the initialization is successful, false otherwise. + */ bool Account::init_new(ton::UnixTime now) { // only workchain and addr are initialized at this point if (workchain == ton::workchainInvalid) { @@ -429,6 +556,11 @@ bool Account::init_new(ton::UnixTime now) { return true; } +/** + * Resets the split depth of the account. + * + * @returns True if the split depth was successfully reset, false otherwise. + */ bool Account::forget_split_depth() { split_depth_set_ = false; split_depth_ = 0; @@ -438,6 +570,11 @@ bool Account::forget_split_depth() { return true; } +/** + * Deactivates the account. + * + * @returns True if the account was successfully deactivated, false otherwise. + */ bool Account::deactivate() { if (status == acc_active) { return false; @@ -461,10 +598,26 @@ bool Account::deactivate() { return true; } +/** + * Checks if the account belongs to a specific shard. + * + * @param shard The shard to check against. + * + * @returns True if the account belongs to the shard, False otherwise. + */ bool Account::belongs_to_shard(ton::ShardIdFull shard) const { return workchain == shard.workchain && ton::shard_is_ancestor(shard.shard, addr); } +/** + * Adds the partial storage payment to the total sum. + * + * @param payment The total sum to be updated. + * @param delta The time delta for which the payment is calculated. + * @param prices The storage prices. + * @param storage Account storage statistics. + * @param is_mc A flag indicating whether the account is in the masterchain. + */ void add_partial_storage_payment(td::BigInt256& payment, ton::UnixTime delta, const block::StoragePrices& prices, const vm::CellStorageStat& storage, bool is_mc) { td::BigInt256 c{(long long)storage.cells}, b{(long long)storage.bits}; @@ -483,6 +636,18 @@ void add_partial_storage_payment(td::BigInt256& payment, ton::UnixTime delta, co payment += b; } +/** + * Computes the storage fees based on the given parameters. + * + * @param now The current Unix time. + * @param pricing The vector of storage prices. + * @param storage_stat Account storage statistics. + * @param last_paid The Unix time when the last payment was made. + * @param is_special A flag indicating if the account is special. + * @param is_masterchain A flag indicating if the account is in the masterchain. + * + * @returns The computed storage fees as RefInt256. + */ td::RefInt256 StoragePrices::compute_storage_fees(ton::UnixTime now, const std::vector& pricing, const vm::CellStorageStat& storage_stat, ton::UnixTime last_paid, bool is_special, bool is_masterchain) { @@ -509,11 +674,30 @@ td::RefInt256 StoragePrices::compute_storage_fees(ton::UnixTime now, const std:: return td::rshift(total, 16, 1); // divide by 2^16 with ceil rounding to obtain nanograms } +/** + * Computes the storage fees for the account. + * + * @param now The current Unix time. + * @param pricing The vector of storage prices. + * + * @returns The computed storage fees as RefInt256. + */ td::RefInt256 Account::compute_storage_fees(ton::UnixTime now, const std::vector& pricing) const { return StoragePrices::compute_storage_fees(now, pricing, storage_stat, last_paid, is_special, is_masterchain()); } namespace transaction { +/** + * Constructs a new Transaction object. + * + * @param _account The Account object. + * @param ttype The type of the transaction (see transaction.cpp#309). + * @param req_start_lt The minimal logical time of the transaction. + * @param _now The current Unix time. + * @param _inmsg The input message that caused the transaction. + * + * @returns None + */ Transaction::Transaction(const Account& _account, int ttype, ton::LogicalTime req_start_lt, ton::UnixTime _now, Ref _inmsg) : trans_type(ttype) @@ -540,6 +724,14 @@ Transaction::Transaction(const Account& _account, int ttype, ton::LogicalTime re } } +/** + * Unpacks the input message of a transaction. + * + * @param ihr_delivered A boolean indicating whether the message was delivered using IHR (Instant Hypercube Routing). + * @param cfg Action phase configuration. + * + * @returns A boolean indicating whether the unpacking was successful. + */ bool Transaction::unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* cfg) { if (in_msg.is_null() || in_msg_type) { return false; @@ -680,6 +872,15 @@ bool Transaction::unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* return true; } +/** + * Prepares the storage phase of a transaction. + * + * @param cfg The configuration for the storage phase. + * @param force_collect Flag indicating whether to collect fees for frozen accounts. + * @param adjust_msg_value Flag indicating whether to adjust the message value if the account balance becomes less than the message balance. + * + * @returns True if the storage phase was successfully prepared, false otherwise. + */ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool force_collect, bool adjust_msg_value) { if (now < account.last_paid) { return false; @@ -743,12 +944,21 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc return true; } +/** + * Prepares the credit phase of a transaction. + * + * This function creates a CreditPhase object and performs the necessary calculations + * to determine the amount to be credited in the credit phase. It updates the due payment, + * credit, balance, and total fees accordingly. + * + * @returns True if the credit phase is prepared successfully, false otherwise. + */ bool Transaction::prepare_credit_phase() { credit_phase = std::make_unique(); - auto collected = std::min(msg_balance_remaining.grams, due_payment); - credit_phase->due_fees_collected = collected; - due_payment -= collected; - credit_phase->credit = msg_balance_remaining -= collected; + // auto collected = std::min(msg_balance_remaining.grams, due_payment); + // credit_phase->due_fees_collected = collected; + // due_payment -= collected; + // credit_phase->credit = msg_balance_remaining -= collected; if (!msg_balance_remaining.is_valid()) { LOG(ERROR) << "cannot compute the amount to be credited in the credit phase of transaction"; return false; @@ -759,17 +969,35 @@ bool Transaction::prepare_credit_phase() { LOG(ERROR) << "cannot credit currency collection to account"; return false; } - total_fees += std::move(collected); + // total_fees += std::move(collected); return true; } } // namespace transaction +/** + * Parses the gas limits and prices from a given cell. + * + * @param cell The cell containing the gas limits and prices serialized using GasLimitsPricing TLB-scheme. + * @param freeze_due_limit Reference to store the freeze due limit. + * @param delete_due_limit Reference to store the delete due limit. + * + * @returns True if the parsing is successful, false otherwise. + */ bool ComputePhaseConfig::parse_GasLimitsPrices(Ref cell, td::RefInt256& freeze_due_limit, td::RefInt256& delete_due_limit) { return cell.not_null() && parse_GasLimitsPrices(vm::load_cell_slice_ref(std::move(cell)), freeze_due_limit, delete_due_limit); } +/** + * Parses the gas limits and prices from a given cell slice. + * + * @param cs The cell slice containing the gas limits and prices serialized using GasLimitsPricing TLB-scheme. + * @param freeze_due_limit Reference to store the freeze due limit. + * @param delete_due_limit Reference to store the delete due limit. + * + * @returns True if the parsing is successful, false otherwise. + */ bool ComputePhaseConfig::parse_GasLimitsPrices(Ref cs, td::RefInt256& freeze_due_limit, td::RefInt256& delete_due_limit) { if (cs.is_null()) { @@ -784,6 +1012,17 @@ bool ComputePhaseConfig::parse_GasLimitsPrices(Ref cs, td::RefInt } } +/** + * Parses the gas limits and prices from a gas limits and prices record. + * + * @param cs The cell slice containing the gas limits and prices serialized using GasLimitsPricing TLB-scheme. + * @param freeze_due_limit A reference to store the freeze due limit. + * @param delete_due_limit A reference to store the delete due limit. + * @param _flat_gas_limit The flat gas limit. + * @param _flat_gas_price The flat gas price. + * + * @returns True if the parsing is successful, false otherwise. + */ bool ComputePhaseConfig::parse_GasLimitsPrices_internal(Ref cs, td::RefInt256& freeze_due_limit, td::RefInt256& delete_due_limit, td::uint64 _flat_gas_limit, td::uint64 _flat_gas_price) { @@ -812,6 +1051,14 @@ bool ComputePhaseConfig::parse_GasLimitsPrices_internal(Ref cs, t return true; } +/** + * Checks if an address is suspended according to the ConfigParam(44). + * + * @param wc The workchain ID. + * @param addr The account address address. + * + * @returns True if the address is suspended, False otherwise. + */ bool ComputePhaseConfig::is_address_suspended(ton::WorkchainId wc, td::Bits256 addr) const { if (!suspended_addresses) { return false; @@ -826,6 +1073,11 @@ bool ComputePhaseConfig::is_address_suspended(ton::WorkchainId wc, td::Bits256 a } } +/** + * Computes the maximum for gas fee based on the gas prices and limits. + * + * Updates max_gas_threshold. + */ void ComputePhaseConfig::compute_threshold() { gas_price256 = td::make_refint(gas_price); if (gas_limit > flat_gas_limit) { @@ -836,6 +1088,13 @@ void ComputePhaseConfig::compute_threshold() { } } +/** + * Computes the amount of gas that can be bought for a given amount of nanograms. + * + * @param nanograms The amount of nanograms to compute gas for. + * + * @returns The amount of gas. + */ td::uint64 ComputePhaseConfig::gas_bought_for(td::RefInt256 nanograms) const { if (nanograms.is_null() || sgn(nanograms) < 0) { return 0; @@ -850,12 +1109,27 @@ td::uint64 ComputePhaseConfig::gas_bought_for(td::RefInt256 nanograms) const { return res->to_long() + flat_gas_limit; } +/** + * Computes the gas price. + * + * @param gas_used The amount of gas used. + * + * @returns The computed gas price. + */ td::RefInt256 ComputePhaseConfig::compute_gas_price(td::uint64 gas_used) const { return gas_used <= flat_gas_limit ? td::make_refint(flat_gas_price) : td::rshift(gas_price256 * (gas_used - flat_gas_limit), 16, 1) + flat_gas_price; } namespace transaction { +/** + * Computes the gas limits for a transaction. + * + * @param cp The ComputePhase object to store the computed gas limits. + * @param cfg The compute phase configuration. + * + * @returns True if the gas limits were successfully computed, false otherwise. + */ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& cfg) { // Compute gas limits if (account.is_special) { @@ -880,6 +1154,14 @@ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& return true; } +/** + * Prepares a TVM stack for a transaction. + * + * @param cp The compute phase object. + * + * @returns A reference to the prepared virtual machine stack. + * Returns an empty reference if the transaction type is invalid. + */ Ref Transaction::prepare_vm_stack(ComputePhase& cp) { Ref stack_ref{true}; td::RefInt256 acc_addr{true}; @@ -906,6 +1188,14 @@ Ref Transaction::prepare_vm_stack(ComputePhase& cp) { } } +/** + * Prepares a random seed for a transaction. + * + * @param rand_seed The output random seed. + * @param cfg The configuration for the compute phase. + * + * @returns True if the random seed was successfully prepared, false otherwise. + */ bool Transaction::prepare_rand_seed(td::BitArray<256>& rand_seed, const ComputePhaseConfig& cfg) const { // we might use SHA256(block_rand_seed . addr . trans_lt) // instead, we use SHA256(block_rand_seed . addr) @@ -918,6 +1208,15 @@ bool Transaction::prepare_rand_seed(td::BitArray<256>& rand_seed, const ComputeP return true; } +/** + * Prepares the c7 tuple (virtual machine context) for a compute phase of a transaction. + * + * @param cfg The configuration for the compute phase. + * + * @returns A reference to a Tuple object. + * + * @throws CollatorError if the rand_seed cannot be computed for the transaction. + */ Ref Transaction::prepare_vm_c7(const ComputePhaseConfig& cfg) const { td::BitArray<256> rand_seed; td::RefInt256 rand_seed_int{true}; @@ -947,10 +1246,15 @@ Ref Transaction::prepare_vm_c7(const ComputePhaseConfig& cfg) const { } tuple.push_back(storage_phase->fees_collected); // storage_fees:Integer - // See crypto/block/mc-config.cpp#2115 (get_prev_blocks_info) + // See crypto/block/mc-config.cpp#2223 (get_prev_blocks_info) // [ wc:Integer shard:Integer seqno:Integer root_hash:Integer file_hash:Integer] = BlockId; // [ last_mc_blocks:[BlockId...] // prev_key_block:BlockId ] : PrevBlocksInfo + // The only context where PrevBlocksInfo (13 parameter of c7) is null is inside emulator + // where it need to be set via transaction_emulator_set_prev_blocks_info (see emulator/emulator-extern.cpp) + // Inside validator, collator and liteserver checking external message contexts + // prev_blocks_info is always not null, since get_prev_blocks_info() + // may only return tuple or raise Error (See crypto/block/mc-config.cpp#2223) tuple.push_back(cfg.prev_blocks_info.not_null() ? vm::StackEntry(cfg.prev_blocks_info) : vm::StackEntry()); } auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); @@ -958,6 +1262,13 @@ Ref Transaction::prepare_vm_c7(const ComputePhaseConfig& cfg) const { return vm::make_tuple_ref(std::move(tuple_ref)); } +/** + * Computes the number of output actions in a list. + * + * @param list c5 cell. + * + * @returns The number of output actions. + */ int output_actions_count(Ref list) { int i = -1; do { @@ -972,6 +1283,13 @@ int output_actions_count(Ref list) { return i; } +/** + * Unpacks the message StateInit. + * + * @param lib_only If true, only unpack libraries from the state. + * + * @returns True if the unpacking is successful, false otherwise. + */ bool Transaction::unpack_msg_state(bool lib_only) { block::gen::StateInit::Record state; if (in_msg_state.is_null() || !tlb::unpack_cell(in_msg_state, state)) { @@ -1002,6 +1320,13 @@ bool Transaction::unpack_msg_state(bool lib_only) { return true; } +/** + * Computes the set of libraries to be used during TVM execution. + * + * @param cfg The configuration for the compute phase. + * + * @returns A vector of hashmaps with libraries. + */ std::vector> Transaction::compute_vm_libraries(const ComputePhaseConfig& cfg) { std::vector> lib_set; if (in_msg_library.not_null()) { @@ -1017,6 +1342,11 @@ std::vector> Transaction::compute_vm_libraries(const ComputePhaseC return lib_set; } +/** + * Checks if the input message StateInit hash corresponds to the account address. + * + * @returns True if the input message state hash is valid, False otherwise. + */ bool Transaction::check_in_msg_state_hash() { CHECK(in_msg_state.not_null()); CHECK(new_split_depth >= 0 && new_split_depth < 32); @@ -1030,6 +1360,13 @@ bool Transaction::check_in_msg_state_hash() { return account.recompute_tmp_addr(my_addr, d, orig_addr_rewrite.bits()); } +/** + * Prepares the compute phase of a transaction, which includes running TVM. + * + * @param cfg The configuration for the compute phase. + * + * @returns True if the compute phase was successfully prepared and executed, false otherwise. + */ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { // TODO: add more skip verifications + sometimes use state from in_msg to re-activate // ... @@ -1188,6 +1525,13 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { return true; } +/** + * Prepares the action phase of a transaction. + * + * @param cfg The configuration for the action phase. + * + * @returns True if the action phase was prepared successfully, false otherwise. + */ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { if (!compute_phase || !compute_phase->success) { return false; @@ -1358,6 +1702,15 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { return true; } +/** + * Tries to set the code for an account. + * + * @param cs The CellSlice containing the action data serialized as action_set_code TLB-scheme. + * @param ap The action phase object. + * @param cfg The action phase configuration. + * + * @returns 0 if the code was successfully set, -1 otherwise. + */ int Transaction::try_action_set_code(vm::CellSlice& cs, ActionPhase& ap, const ActionPhaseConfig& cfg) { block::gen::OutAction::Record_action_set_code rec; if (!tlb::unpack_exact(cs, rec)) { @@ -1369,6 +1722,19 @@ int Transaction::try_action_set_code(vm::CellSlice& cs, ActionPhase& ap, const A return 0; } +/** + * Tries to change the library in the transaction. + * + * @param cs The cell slice containing the action data serialized as action_change_library TLB-scheme. + * @param ap The action phase object. + * @param cfg The action phase configuration. + * + * @returns 0 if the action was successfully performed, + * -1 if there was an error unpacking the data or the mode is invalid, + * 41 if the library reference is required but is null, + * 43 if the number of cells in the library exceeds the limit, + * 42 if there was a VM error during the operation. + */ int Transaction::try_action_change_library(vm::CellSlice& cs, ActionPhase& ap, const ActionPhaseConfig& cfg) { block::gen::OutAction::Record_action_change_library rec; if (!tlb::unpack_exact(cs, rec)) { @@ -1435,9 +1801,18 @@ int Transaction::try_action_change_library(vm::CellSlice& cs, ActionPhase& ap, c } } // namespace transaction -// msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms -// ihr_fwd_fees = ceil((msg_fwd_fees * ihr_price_factor)/2^16) nanograms -// bits in the root cell of a message are not included in msg.bits (lump_price pays for them) +/** + * Computes the forward fees for a message based on the number of cells and bits. + * + * msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms + * ihr_fwd_fees = ceil((msg_fwd_fees * ihr_price_factor)/2^16) nanograms + * bits in the root cell of a message are not included in msg.bits (lump_price pays for them) + * + * @param cells The number of cells in the message. + * @param bits The number of bits in the message. + * + * @returns The computed forward fees for the message. + */ td::uint64 MsgPrices::compute_fwd_fees(td::uint64 cells, td::uint64 bits) const { return lump_price + td::uint128(bit_price) .mult(bits) @@ -1447,6 +1822,15 @@ td::uint64 MsgPrices::compute_fwd_fees(td::uint64 cells, td::uint64 bits) const .lo(); } +/** + * Computes the forward fees and IHR fees for a message with the given number of cells and bits. + * + * @param cells The number of cells. + * @param bits The number of bits. + * @param ihr_disabled Flag indicating whether IHR is disabled. + * + * @returns A pair of values representing the forward fees and IHR fees. + */ std::pair MsgPrices::compute_fwd_ihr_fees(td::uint64 cells, td::uint64 bits, bool ihr_disabled) const { td::uint64 fwd = compute_fwd_fees(cells, bits); @@ -1456,19 +1840,47 @@ std::pair MsgPrices::compute_fwd_ihr_fees(td::uint64 cel return std::pair(fwd, td::uint128(fwd).mult(ihr_factor).shr(16).lo()); } +/** + * Computes the part of the fees that go to the total fees of the current block. + * + * @param total The amount of fees. + * + * @returns The the part of the fees that go to the total fees of the current block. + */ td::RefInt256 MsgPrices::get_first_part(td::RefInt256 total) const { return (std::move(total) * first_frac) >> 16; } +/** + * Computes the part of the fees that go to the total fees of the current block. + * + * @param total The amount of fees. + * + * @returns The the part of the fees that go to the total fees of the current block. + */ td::uint64 MsgPrices::get_first_part(td::uint64 total) const { return td::uint128(total).mult(first_frac).shr(16).lo(); } +/** + * Computes the part of the fees that go to the total fees of the transit block. + * + * @param total The amount of fees. + * + * @returns The the part of the fees that go to the total fees of the transit block. + */ td::RefInt256 MsgPrices::get_next_part(td::RefInt256 total) const { return (std::move(total) * next_frac) >> 16; } namespace transaction { +/** + * Checks if the source address is addr_none and replaces is with the account address. + * + * @param src_addr A reference to the source address of the message. + * + * @returns True if the source address is addr_none or is equal to the account address. + */ bool Transaction::check_replace_src_addr(Ref& src_addr) const { int t = (int)src_addr->prefetch_ulong(2); if (!t && src_addr->size_ext() == 2) { @@ -1489,6 +1901,15 @@ bool Transaction::check_replace_src_addr(Ref& src_addr) const { return false; } +/** + * Checks the destination address of a message, rewrites it if it is an anycast address. + * + * @param dest_addr A reference to the destination address of the transaction. + * @param cfg The configuration for the action phase. + * @param is_mc A pointer to a boolean where it will be stored whether the destination is in the masterchain. + * + * @returns True if the destination address is valid, false otherwise. + */ bool Transaction::check_rewrite_dest_addr(Ref& dest_addr, const ActionPhaseConfig& cfg, bool* is_mc) const { if (!dest_addr->prefetch_ulong(1)) { @@ -1595,6 +2016,17 @@ bool Transaction::check_rewrite_dest_addr(Ref& dest_addr, const A return true; } +/** + * Tries to send a message. + * + * @param cs0 The cell slice containing the action data serialized as action_send_msg TLB-scheme. + * @param ap The action phase. + * @param cfg The action phase configuration. + * @param redoing The index of the attempt, starting from 0. On later attempts tries to move message body and StateInit to separate cells. + * + * @returns 0 if the message is successfully sent or if the error may be ignored, error code otherwise. + * Returns -2 if the action should be attempted again. + */ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, const ActionPhaseConfig& cfg, int redoing) { block::gen::OutAction::Record_action_send_msg act_rec; @@ -1990,6 +2422,15 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, return 0; } +/** + * Tries to reserve a currency an action phase. + * + * @param cs The cell slice containing the action data serialized as action_reserve_currency TLB-scheme. + * @param ap The action phase. + * @param cfg The action phase configuration. + * + * @returns 0 if the currency is successfully reserved, error code otherwise. + */ int Transaction::try_action_reserve_currency(vm::CellSlice& cs, ActionPhase& ap, const ActionPhaseConfig& cfg) { block::gen::OutAction::Record_action_reserve_currency rec; if (!tlb::unpack_exact(cs, rec)) { @@ -2060,6 +2501,15 @@ int Transaction::try_action_reserve_currency(vm::CellSlice& cs, ActionPhase& ap, return 0; } +/** + * Checks that the new account state fits in the limits. + * + * @param cfg The configuration for the action phase. + * + * @returns A `td::Status` indicating the result of the check. + * - If the state limits are within the allowed range, returns OK. + * - If the state limits exceed the maximum allowed range, returns an error. + */ td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { auto cell_equal = [](const td::Ref& a, const td::Ref& b) -> bool { if (a.is_null()) { @@ -2104,6 +2554,13 @@ td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { : td::Status::Error("state too big"); } +/** + * Prepares the bounce phase of a transaction. + * + * @param cfg The configuration for the action phase. + * + * @returns True if the bounce phase was successfully prepared, false otherwise. + */ bool Transaction::prepare_bounce_phase(const ActionPhaseConfig& cfg) { if (in_msg.is_null() || !bounce_enabled) { return false; @@ -2209,6 +2666,14 @@ bool Transaction::prepare_bounce_phase(const ActionPhaseConfig& cfg) { * */ +/** + * Stores the account status in a CellBuilder object. + * + * @param cb The CellBuilder object to store the account status in. + * @param acc_status The account status to store. + * + * @returns True if the account status was successfully stored, false otherwise. + */ bool Account::store_acc_status(vm::CellBuilder& cb, int acc_status) const { int v; switch (acc_status) { @@ -2231,6 +2696,17 @@ bool Account::store_acc_status(vm::CellBuilder& cb, int acc_status) const { return cb.store_long_bool(v, 2); } +/** + * Tries to update the storage statistics based on the old storage statistics and old account state without fully recomputing it. + * + * It succeeds if only root cell of AccountStorage is changed. + * + * @param old_stat The old storage statistics. + * @param old_cs The old AccountStorage. + * @param new_cell The new AccountStorage. + * + * @returns An optional value of type vm::CellStorageStat. If the update is successful, it returns the new storage statistics. Otherwise, it returns an empty optional. + */ static td::optional try_update_storage_stat(const vm::CellStorageStat& old_stat, td::Ref old_cs, td::Ref new_cell) { @@ -2258,6 +2734,11 @@ static td::optional try_update_storage_stat(const vm::CellS } namespace transaction { +/** + * Computes the new state of the account. + * + * @returns True if the state computation is successful, false otherwise. + */ bool Transaction::compute_state() { if (new_total_state.not_null()) { return true; @@ -2360,6 +2841,13 @@ bool Transaction::compute_state() { return true; } +/** + * Serializes the transaction object using Transaction TLB-scheme. + * + * Updates root. + * + * @returns True if the serialization is successful, False otherwise. + */ bool Transaction::serialize() { if (root.not_null()) { return true; @@ -2462,6 +2950,13 @@ bool Transaction::serialize() { return true; } +/** + * Serializes the storage phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the serialization is successful, false otherwise. + */ bool Transaction::serialize_storage_phase(vm::CellBuilder& cb) { if (!storage_phase) { return false; @@ -2485,6 +2980,13 @@ bool Transaction::serialize_storage_phase(vm::CellBuilder& cb) { return ok; } +/** + * Serializes the credit phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the credit phase was successfully serialized, false otherwise. + */ bool Transaction::serialize_credit_phase(vm::CellBuilder& cb) { if (!credit_phase) { return false; @@ -2494,6 +2996,13 @@ bool Transaction::serialize_credit_phase(vm::CellBuilder& cb) { return block::store_Maybe_Grams_nz(cb, cp.due_fees_collected) && cp.credit.store(cb); } +/** + * Serializes the compute phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the serialization was successful, false otherwise. + */ bool Transaction::serialize_compute_phase(vm::CellBuilder& cb) { if (!compute_phase) { return false; @@ -2536,6 +3045,13 @@ bool Transaction::serialize_compute_phase(vm::CellBuilder& cb) { return ok; } +/** + * Serializes the action phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the serialization is successful, false otherwise. + */ bool Transaction::serialize_action_phase(vm::CellBuilder& cb) { if (!action_phase) { return false; @@ -2560,6 +3076,13 @@ bool Transaction::serialize_action_phase(vm::CellBuilder& cb) { return ok; } +/** + * Serializes the bounce phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the bounce phase was successfully serialized, false otherwise. + */ bool Transaction::serialize_bounce_phase(vm::CellBuilder& cb) { if (!bounce_phase) { return false; @@ -2580,6 +3103,15 @@ bool Transaction::serialize_bounce_phase(vm::CellBuilder& cb) { } } +/** + * Estimates the block storage profile increment if the transaction is added to the block. + * + * @param store_stat The current storage statistics of the block. + * @param usage_tree The usage tree of the block. + * + * @returns The estimated block storage profile increment. + * Returns Error if the transaction is not serialized or if its new state is not computed. + */ td::Result Transaction::estimate_block_storage_profile_incr( const vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const { if (root.is_null()) { @@ -2591,26 +3123,14 @@ td::Result Transaction::estimate_block_storage_pro return store_stat.tentative_add_proof(new_total_state, usage_tree) + store_stat.tentative_add_cell(root); } -bool Transaction::update_block_storage_profile(vm::NewCellStorageStat& store_stat, - const vm::CellUsageTree* usage_tree) const { - if (root.is_null() || new_total_state.is_null()) { - return false; - } - store_stat.add_proof(new_total_state, usage_tree); - store_stat.add_cell(root); - return true; -} - -bool Transaction::would_fit(unsigned cls, const block::BlockLimitStatus& blimst) const { - auto res = estimate_block_storage_profile_incr(blimst.st_stat, blimst.limits.usage_tree); - if (res.is_error()) { - LOG(ERROR) << res.move_as_error(); - return false; - } - auto extra = res.move_as_ok(); - return blimst.would_fit(cls, end_lt, gas_used(), &extra); -} - +/** + * Updates the limits status of a block. + * + * @param blimst The block limit status object to update. + * @param with_size Flag indicating whether to update the size limits. + * + * @returns True if the limits were successfully updated, False otherwise. + */ bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) const { if (!(blimst.update_lt(end_lt) && blimst.update_gas(gas_used()))) { return false; @@ -2628,6 +3148,13 @@ bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) * */ +/** + * Commits a transaction for a given account. + * + * @param acc The account to commit the transaction for. + * + * @returns A reference to the root cell of the serialized transaction. + */ Ref Transaction::commit(Account& acc) { CHECK(account.last_trans_end_lt_ <= start_lt && start_lt < end_lt); CHECK(root.not_null()); @@ -2672,14 +3199,33 @@ Ref Transaction::commit(Account& acc) { return root; } +/** + * Extracts the output message at the specified index from the transaction. + * + * @param i The index of the output message to extract. + * + * @returns A pair of the logical time and the extracted output message. + */ LtCellRef Transaction::extract_out_msg(unsigned i) { return {start_lt + i + 1, std::move(out_msgs.at(i))}; } +/** + * Extracts the output message at index i from the transaction. + * + * @param i The index of the output message to extract. + * + * @returns A triple of the logical time, the extracted output message and the transaction root. + */ NewOutMsg Transaction::extract_out_msg_ext(unsigned i) { return {start_lt + i + 1, std::move(out_msgs.at(i)), root}; } +/** + * Extracts the outgoing messages from the transaction and adds them to the given list. + * + * @param list The list to which the outgoing messages will be added. + */ void Transaction::extract_out_msgs(std::vector& list) { for (unsigned i = 0; i < out_msgs.size(); i++) { list.emplace_back(start_lt + i + 1, std::move(out_msgs[i])); @@ -2687,10 +3233,23 @@ void Transaction::extract_out_msgs(std::vector& list) { } } // namespace transaction +/** + * Adds a transaction to the account's transaction list. + * + * @param trans_root The root of the transaction cell. + * @param trans_lt The logical time of the transaction. + */ void Account::push_transaction(Ref trans_root, ton::LogicalTime trans_lt) { transactions.emplace_back(trans_lt, std::move(trans_root)); } +/** + * Serializes an account block for the account using AccountBlock TLB-scheme. + * + * @param cb The CellBuilder used to store the serialized data. + * + * @returns True if the account block was successfully created, false otherwise. + */ bool Account::create_account_block(vm::CellBuilder& cb) { if (transactions.empty()) { return false; @@ -2719,6 +3278,11 @@ bool Account::create_account_block(vm::CellBuilder& cb) { && cb.store_ref_bool(cb2.finalize()); // state_update:^(HASH_UPDATE Account) } +/** + * Checks if the libraries stored in the account object have changed. + * + * @returns True if the libraries have changed, False otherwise. + */ bool Account::libraries_changed() const { bool s = orig_library.not_null(); bool t = library.not_null(); @@ -2729,6 +3293,21 @@ bool Account::libraries_changed() const { } } +/** + * Fetches and initializes various configuration parameters from masterchain config for transaction processing. + * + * @param config The masterchain configuration. + * @param old_mparams Pointer to store a dictionary of mandatory parameters (ConfigParam 9). + * @param storage_prices Pointer to store the storage prices. + * @param storage_phase_cfg Pointer to store the storage phase configuration. + * @param rand_seed Pointer to the random seed. Generates a new seed if the value is `td::Bits256::zero()`. + * @param compute_phase_cfg Pointer to store the compute phase configuration. + * @param action_phase_cfg Pointer to store the action phase configuration. + * @param masterchain_create_fee Pointer to store the masterchain create fee. + * @param basechain_create_fee Pointer to store the basechain create fee. + * @param wc The workchain ID. + * @param now The current Unix time. + */ td::Status FetchConfigParams::fetch_config_params( const block::ConfigInfo& config, Ref* old_mparams, std::vector* storage_prices, StoragePhaseConfig* storage_phase_cfg, td::BitArray<256>* rand_seed, ComputePhaseConfig* compute_phase_cfg, @@ -2744,6 +3323,22 @@ td::Status FetchConfigParams::fetch_config_params( basechain_create_fee, wc, now); } +/** + * Fetches and initializes various configuration parameters from masterchain config for transaction processing. + * + * @param config The masterchain configuration. + * @param prev_blocks_info The tuple with information about previous blocks. + * @param old_mparams Pointer to store a dictionary of mandatory parameters (ConfigParam 9). + * @param storage_prices Pointer to store the storage prices. + * @param storage_phase_cfg Pointer to store the storage phase configuration. + * @param rand_seed Pointer to the random seed. Generates a new seed if the value is `td::Bits256::zero()`. + * @param compute_phase_cfg Pointer to store the compute phase configuration. + * @param action_phase_cfg Pointer to store the action phase configuration. + * @param masterchain_create_fee Pointer to store the masterchain create fee. + * @param basechain_create_fee Pointer to store the basechain create fee. + * @param wc The workchain ID. + * @param now The current Unix time. + */ td::Status FetchConfigParams::fetch_config_params( const block::Config& config, td::Ref prev_blocks_info, Ref* old_mparams, std::vector* storage_prices, StoragePhaseConfig* storage_phase_cfg, diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index 3ae8cdf8..6bb47fd9 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -270,7 +270,7 @@ struct Account { return balance; } bool set_address(ton::WorkchainId wc, td::ConstBitPtr new_addr); - bool unpack(Ref account, Ref extra, ton::UnixTime now, bool special = false); + bool unpack(Ref account, ton::UnixTime now, bool special); bool init_new(ton::UnixTime now); bool deactivate(); bool recompute_tmp_addr(Ref& tmp_addr, int split_depth, td::ConstBitPtr orig_addr_rewrite) const; @@ -382,8 +382,6 @@ struct Transaction { td::Result estimate_block_storage_profile_incr( const vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const; - bool update_block_storage_profile(vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const; - bool would_fit(unsigned cls, const block::BlockLimitStatus& blk_lim_st) const; bool update_limits(block::BlockLimitStatus& blk_lim_st, bool with_size = true) const; Ref commit(Account& _account); // _account should point to the same account diff --git a/crypto/common/bitstring.cpp b/crypto/common/bitstring.cpp index c10a4ff3..5135cdf0 100644 --- a/crypto/common/bitstring.cpp +++ b/crypto/common/bitstring.cpp @@ -130,7 +130,7 @@ void bits_memcpy(unsigned char* to, int to_offs, const unsigned char* from, int from_offs &= 7; to_offs &= 7; //fprintf(stderr, "bits_memcpy: from=%p (%02x) to=%p (%02x) from_offs=%d to_offs=%d count=%lu\n", from, *from, to, *to, from_offs, to_offs, bit_count); - int sz = (int)bit_count; + int sz = static_cast(bit_count); bit_count += from_offs; if (from_offs == to_offs) { if (bit_count < 8) { @@ -206,7 +206,7 @@ void bits_memset(unsigned char* to, int to_offs, bool val, std::size_t bit_count } to += (to_offs >> 3); to_offs &= 7; - int sz = (int)bit_count; + int sz = static_cast(bit_count); bit_count += to_offs; int c = *to; if (bit_count <= 8) { diff --git a/crypto/fift/lib/Asm.fif b/crypto/fift/lib/Asm.fif index d9cee5d8..a0bfe642 100644 --- a/crypto/fift/lib/Asm.fif +++ b/crypto/fift/lib/Asm.fif @@ -461,19 +461,109 @@ x{B7A3} @Defop QNEGATE x{B7A4} @Defop QINC x{B7A5} @Defop QDEC x{B7A8} @Defop QMUL + x{B7A904} @Defop QDIV x{B7A905} @Defop QDIVR x{B7A906} @Defop QDIVC x{B7A908} @Defop QMOD +x{B7A909} @Defop QMODR +x{B7A90A} @Defop QMODC x{B7A90C} @Defop QDIVMOD x{B7A90D} @Defop QDIVMODR x{B7A90E} @Defop QDIVMODC x{B7A900} @Defop QADDDIVMOD x{B7A901} @Defop QADDDIVMODR x{B7A902} @Defop QADDDIVMODC + +x{B7A925} @Defop QRSHIFTR +x{B7A926} @Defop QRSHIFTC +x{B7A928} @Defop QMODPOW2 +x{B7A929} @Defop QMODPOW2R +x{B7A92A} @Defop QMODPOW2C +x{B7A92C} @Defop QRSHIFTMOD +x{B7A92D} @Defop QRSHIFTMODR +x{B7A92E} @Defop QRSHIFTMODC +x{B7A920} @Defop QADDRSHIFTMOD +x{B7A921} @Defop QADDRSHIFTMODR +x{B7A922} @Defop QADDRSHIFTMODC + +x{B7A935} @Defop(8u+1) QRSHIFTR# +x{B7A936} @Defop(8u+1) QRSHIFTC# +x{B7A938} @Defop(8u+1) QMODPOW2# +x{B7A939} @Defop(8u+1) QMODPOW2R# +x{B7A93A} @Defop(8u+1) QMODPOW2C# +x{B7A93C} @Defop(8u+1) QRSHIFT#MOD +x{B7A93D} @Defop(8u+1) QRSHIFTR#MOD +x{B7A93E} @Defop(8u+1) QRSHIFTC#MOD +x{B7A930} @Defop(8u+1) QADDRSHIFT#MOD +x{B7A931} @Defop(8u+1) QADDRSHIFTR#MOD +x{B7A932} @Defop(8u+1) QADDRSHIFTC#MOD + +x{B7A984} @Defop QMULDIV x{B7A985} @Defop QMULDIVR +x{B7A986} @Defop QMULDIVC +x{B7A988} @Defop QMULMOD +x{B7A989} @Defop QMULMODR +x{B7A98A} @Defop QMULMODC x{B7A98C} @Defop QMULDIVMOD -x{B7A980} @Defop QADDMULDIVMOD +x{B7A98D} @Defop QMULDIVMODR +x{B7A98E} @Defop QMULDIVMODC +x{B7A980} @Defop QMULADDDIVMOD +x{B7A981} @Defop QMULADDDIVMODR +x{B7A982} @Defop QMULADDDIVMODC + +x{B7A9A4} @Defop QMULRSHIFT +x{B7A9A5} @Defop QMULRSHIFTR +x{B7A9A6} @Defop QMULRSHIFTC +x{B7A9A8} @Defop QMULMODPOW2 +x{B7A9A9} @Defop QMULMODPOW2R +x{B7A9AA} @Defop QMULMODPOW2C +x{B7A9AC} @Defop QMULRSHIFTMOD +x{B7A9AD} @Defop QMULRSHIFTRMOD +x{B7A9AE} @Defop QMULRSHIFTCMOD +x{B7A9A0} @Defop QMULADDRSHIFTMOD +x{B7A9A1} @Defop QMULADDRSHIFTRMOD +x{B7A9A2} @Defop QMULADDRSHIFTCMOD + +x{B7A9B4} @Defop(8u+1) QMULRSHIFT# +x{B7A9B5} @Defop(8u+1) QMULRSHIFTR# +x{B7A9B6} @Defop(8u+1) QMULRSHIFTC# +x{B7A9B8} @Defop(8u+1) QMULMODPOW2# +x{B7A9B9} @Defop(8u+1) QMULMODPOW2R# +x{B7A9BA} @Defop(8u+1) QMULMODPOW2C# +x{B7A9BC} @Defop(8u+1) QMULRSHIFT#MOD +x{B7A9BD} @Defop(8u+1) QMULRSHIFTR#MOD +x{B7A9BE} @Defop(8u+1) QMULRSHIFTC#MOD +x{B7A9B0} @Defop(8u+1) QMULADDRSHIFT#MOD +x{B7A9B1} @Defop(8u+1) QMULADDRSHIFTR#MOD +x{B7A9B2} @Defop(8u+1) QMULADDRSHIFTC#MOD + +x{B7A9C4} @Defop QLSHIFTDIV +x{B7A9C5} @Defop QLSHIFTDIVR +x{B7A9C6} @Defop QLSHIFTDIVC +x{B7A9C8} @Defop QLSHIFTMOD +x{B7A9C9} @Defop QLSHIFTMODR +x{B7A9CA} @Defop QLSHIFTMODC +x{B7A9CC} @Defop QLSHIFTDIVMOD +x{B7A9CD} @Defop QLSHIFTDIVMODR +x{B7A9CE} @Defop QLSHIFTDIVMODC +x{B7A9C0} @Defop QLSHIFTADDDIVMOD +x{B7A9C1} @Defop QLSHIFTADDDIVMODR +x{B7A9C2} @Defop QLSHIFTADDDIVMODC + +x{B7A9D4} @Defop(8u+1) QLSHIFT#DIV +x{B7A9D5} @Defop(8u+1) QLSHIFT#DIVR +x{B7A9D6} @Defop(8u+1) QLSHIFT#DIVC +x{B7A9D8} @Defop(8u+1) QLSHIFT#MOD +x{B7A9D9} @Defop(8u+1) QLSHIFT#MODR +x{B7A9DA} @Defop(8u+1) QLSHIFT#MODC +x{B7A9DC} @Defop(8u+1) QLSHIFT#DIVMOD +x{B7A9DD} @Defop(8u+1) QLSHIFT#DIVMODR +x{B7A9DE} @Defop(8u+1) QLSHIFT#DIVMODC +x{B7A9D0} @Defop(8u+1) QLSHIFT#ADDDIVMOD +x{B7A9D1} @Defop(8u+1) QLSHIFT#ADDDIVMODR +x{B7A9D2} @Defop(8u+1) QLSHIFT#ADDDIVMODC + x{B7AC} @Defop QLSHIFT x{B7AD} @Defop QRSHIFT x{B7AE} @Defop QPOW2 @@ -1185,7 +1275,7 @@ x{F4BF} @Defop DICTUGETEXECZ x{F800} @Defop ACCEPT x{F801} @Defop SETGASLIMIT -x{F802} @Defop GASCONSUMED +x{F806} @Defop GASCONSUMED x{F80F} @Defop COMMIT x{F810} @Defop RANDU256 diff --git a/crypto/fift/words.cpp b/crypto/fift/words.cpp index 56dd4de2..8d652afc 100644 --- a/crypto/fift/words.cpp +++ b/crypto/fift/words.cpp @@ -3512,7 +3512,7 @@ void init_words_ton(Dictionary& d) { void init_words_vm(Dictionary& d, bool enable_debug) { using namespace std::placeholders; - vm::init_op_cp0(enable_debug); + vm::init_vm(enable_debug).ensure(); // vm run d.def_word("vmlibs ", LitCont::literal(vm_libraries)); // d.def_ctx_word("runvmcode ", std::bind(interpret_run_vm, _1, 0x40)); diff --git a/crypto/func/auto-tests/legacy_tester.py b/crypto/func/auto-tests/legacy_tester.py index e852a043..9a990501 100644 --- a/crypto/func/auto-tests/legacy_tester.py +++ b/crypto/func/auto-tests/legacy_tester.py @@ -8,37 +8,37 @@ import shutil add_pragmas = [] #["allow-post-modification", "compute-asm-ltr"]; tests = [ - # note, that deployed version of elector,config and multisig differ since it is compilled with func-0.1.0. - # Newer compillators optimize arithmetic and logic expression that can be calculated at the compile time - ["elector/elector-code.fc", 115226404411715505328583639896096915745686314074575650766750648324043316883483], - ["config/config-code.fc", 10913070768607625342121305745084703121685937915388357634624451844356456145601], - ["eth-bridge-multisig/multisig-code.fc", 101509909129354488841890823627011033360100627957439967918234053299675481277954], + # note, that deployed version of elector,config and multisig differ since it is compilled with func-0.1.0. + # Newer compillators optimize arithmetic and logic expression that can be calculated at the compile time + ["elector/elector-code.fc", 115226404411715505328583639896096915745686314074575650766750648324043316883483], + ["config/config-code.fc", 10913070768607625342121305745084703121685937915388357634624451844356456145601], + ["eth-bridge-multisig/multisig-code.fc", 101509909129354488841890823627011033360100627957439967918234053299675481277954], - ["bsc-bridge-collector/votes-collector.fc", 62190447221288642706570413295807615918589884489514159926097051017036969900417], - ["uni-lock-wallet/uni-lockup-wallet.fc", 61959738324779104851267145467044677651344601417998258530238254441977103654381], - ["nft-collection/nft-collection-editable.fc", 45561997735512210616567774035540357815786262097548276229169737015839077731274], - ["dns-collection/nft-collection.fc", 107999822699841936063083742021519765435859194241091312445235370766165379261859], + ["bsc-bridge-collector/votes-collector.fc", 62190447221288642706570413295807615918589884489514159926097051017036969900417], + ["uni-lock-wallet/uni-lockup-wallet.fc", 61959738324779104851267145467044677651344601417998258530238254441977103654381], + ["nft-collection/nft-collection-editable.fc", 45561997735512210616567774035540357815786262097548276229169737015839077731274], + ["dns-collection/nft-collection.fc", 107999822699841936063083742021519765435859194241091312445235370766165379261859], - # note, that deployed version of tele-nft-item differs since it is compilled with func-0.3.0. - # After introducing of try/catch construction, c2 register is not always the default one. - # Thus it is necessary to save it upon jumps, differences of deployed and below compilled is that - # "c2 SAVE" is added to the beginning of recv_internal. It does not change behavior. - ["tele-nft-item/nft-item.fc", 69777543125381987786450436977742010705076866061362104025338034583422166453344], + # note, that deployed version of tele-nft-item differs since it is compilled with func-0.3.0. + # After introducing of try/catch construction, c2 register is not always the default one. + # Thus it is necessary to save it upon jumps, differences of deployed and below compilled is that + # "c2 SAVE" is added to the beginning of recv_internal. It does not change behavior. + ["tele-nft-item/nft-item.fc", 69777543125381987786450436977742010705076866061362104025338034583422166453344], - ["storage/storage-contract.fc", 91377830060355733016937375216020277778264560226873154627574229667513068328151], - ["storage/storage-provider.fc", 13618336676213331164384407184540461509022654507176709588621016553953760588122], - ["nominator-pool/pool.fc", 69767057279163099864792356875696330339149706521019810113334238732928422055375], - ["jetton-minter/jetton-minter.fc", 9028309926287301331466371999814928201427184114165428257502393474125007156494], - ["gg-marketplace/nft-marketplace-v2.fc", 92199806964112524639740773542356508485601908152150843819273107618799016205930], - ["jetton-wallet/jetton-wallet.fc", 86251125787443633057458168028617933212663498001665054651523310772884328206542], - ["whales-nominators/nominators.fc", 8941364499854379927692172316865293429893094891593442801401542636695127885153], + ["storage/storage-contract.fc", 91377830060355733016937375216020277778264560226873154627574229667513068328151], + ["storage/storage-provider.fc", 13618336676213331164384407184540461509022654507176709588621016553953760588122], + ["nominator-pool/pool.fc", 69767057279163099864792356875696330339149706521019810113334238732928422055375], + ["jetton-minter/jetton-minter.fc", 9028309926287301331466371999814928201427184114165428257502393474125007156494], + ["gg-marketplace/nft-marketplace-v2.fc", 92199806964112524639740773542356508485601908152150843819273107618799016205930], + ["jetton-wallet/jetton-wallet.fc", 86251125787443633057458168028617933212663498001665054651523310772884328206542], + ["whales-nominators/nominators.fc", 8941364499854379927692172316865293429893094891593442801401542636695127885153], - ["tact-examples/treasure_Treasure.code.fc", 13962538639825790677138656603323869918938565499584297120566680287245364723897], - ["tact-examples/jetton_SampleJetton.code.fc", 94076762218493729104783735200107713211245710256802265203823917715299139499110], - ["tact-examples/jetton_JettonDefaultWallet.code.fc", 29421313492520031238091587108198906058157443241743283101866538036369069620563], - ["tact-examples/maps_MapTestContract.code.fc", 22556550222249123835909180266811414538971143565993192846012583552876721649744], + ["tact-examples/treasure_Treasure.code.fc", 13962538639825790677138656603323869918938565499584297120566680287245364723897], + ["tact-examples/jetton_SampleJetton.code.fc", 94076762218493729104783735200107713211245710256802265203823917715299139499110], + ["tact-examples/jetton_JettonDefaultWallet.code.fc", 29421313492520031238091587108198906058157443241743283101866538036369069620563], + ["tact-examples/maps_MapTestContract.code.fc", 22556550222249123835909180266811414538971143565993192846012583552876721649744], ] def getenv(name, default=None): @@ -51,7 +51,6 @@ def getenv(name, default=None): FUNC_EXECUTABLE = getenv("FUNC_EXECUTABLE", "func") FIFT_EXECUTABLE = getenv("FIFT_EXECUTABLE", "fift") -FIFT_LIBS = getenv("FIFTPATH") TMP_DIR = tempfile.mkdtemp() COMPILED_FIF = os.path.join(TMP_DIR, "compiled.fif") @@ -63,49 +62,49 @@ class ExecutionError(Exception): pass def pre_process_func(f): - shutil.copyfile(f, f+"_backup") - with open(f, "r") as src: - sources = src.read() - with open(f, "w") as src: - for pragma in add_pragmas: - src.write("#pragma %s;\n"%pragma) - src.write(sources) + shutil.copyfile(f, f+"_backup") + with open(f, "r") as src: + sources = src.read() + with open(f, "w") as src: + for pragma in add_pragmas: + src.write("#pragma %s;\n"%pragma) + src.write(sources) def post_process_func(f): - shutil.move(f+"_backup", f) + shutil.move(f+"_backup", f) def compile_func(f): res = None try: pre_process_func(f) if "storage-provider.fc" in f : - # This contract requires building of storage-contract to include it as ref - with open(f, "r") as src: - sources = src.read() + # This contract requires building of storage-contract to include it as ref + with open(f, "r") as src: + sources = src.read() + COMPILED_ST_BOC = os.path.join(TMP_DIR, "storage-contract-code.boc") + sources = sources.replace("storage-contract-code.boc", COMPILED_ST_BOC) + with open(f, "w") as src: + src.write(sources) + COMPILED_ST_FIF = os.path.join(TMP_DIR, "storage-contract.fif") COMPILED_ST_BOC = os.path.join(TMP_DIR, "storage-contract-code.boc") - sources = sources.replace("storage-contract-code.boc", COMPILED_ST_BOC) - with open(f, "w") as src: - src.write(sources) - COMPILED_ST_FIF = os.path.join(TMP_DIR, "storage-contract.fif") - COMPILED_ST_BOC = os.path.join(TMP_DIR, "storage-contract-code.boc") - COMPILED_BUILD_BOC = os.path.join(TMP_DIR, "build-boc.fif") - res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_ST_FIF, "-SPA", f.replace("storage-provider.fc","storage-contract.fc")], capture_output=False, timeout=10) - with open(COMPILED_BUILD_BOC, "w") as scr: - scr.write("\"%s\" include boc>B \"%s\" B>file "%(COMPILED_ST_FIF, COMPILED_ST_BOC)) - res = subprocess.run([FIFT_EXECUTABLE, COMPILED_BUILD_BOC ], capture_output=True, timeout=10) - - + COMPILED_BUILD_BOC = os.path.join(TMP_DIR, "build-boc.fif") + res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_ST_FIF, "-SPA", f.replace("storage-provider.fc","storage-contract.fc")], capture_output=False, timeout=10) + with open(COMPILED_BUILD_BOC, "w") as scr: + scr.write("\"%s\" include boc>B \"%s\" B>file "%(COMPILED_ST_FIF, COMPILED_ST_BOC)) + res = subprocess.run([FIFT_EXECUTABLE, COMPILED_BUILD_BOC ], capture_output=True, timeout=10) + + res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_FIF, "-SPA", f], capture_output=True, timeout=10) except Exception as e: - post_process_func(f) - raise e + post_process_func(f) + raise e else: - post_process_func(f) + post_process_func(f) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) def run_runner(): - res = subprocess.run([FIFT_EXECUTABLE, "-I", FIFT_LIBS, RUNNER_FIF], capture_output=True, timeout=10) + res = subprocess.run([FIFT_EXECUTABLE, RUNNER_FIF], capture_output=True, timeout=10) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) s = str(res.stdout, "utf-8") @@ -138,16 +137,15 @@ for ti, t in enumerate(tests): try: func_out = run_runner() if func_out != th: - raise ExecutionError("Error : expected '%d', found '%d'" % (th, func_out)) + raise ExecutionError("Error : expected '%d', found '%d'" % (th, func_out)) success += 1 except ExecutionError as e: print(e, file=sys.stderr) - #print("Compiled:", file=sys.stderr) - #with open(COMPILED_FIF, "r") as f: - # print(f.read(), file=sys.stderr) - #exit(2) + print("Compiled:", file=sys.stderr) + with open(COMPILED_FIF, "r") as f: + print(f.read(), file=sys.stderr) + exit(2) print(" OK ", file=sys.stderr) print(get_version()) -print("Done: Success %d, Error: %d"%(success, len(tests)-success), file=sys.stderr) - +print("Done: Success %d, Error: %d"%(success, len(tests)-success), file=sys.stderr) \ No newline at end of file diff --git a/crypto/func/auto-tests/run_tests.py b/crypto/func/auto-tests/run_tests.py index 0f12332d..158e871b 100644 --- a/crypto/func/auto-tests/run_tests.py +++ b/crypto/func/auto-tests/run_tests.py @@ -4,6 +4,7 @@ import subprocess import sys import tempfile + def getenv(name, default=None): if name in os.environ: return os.environ[name] @@ -12,10 +13,9 @@ def getenv(name, default=None): exit(1) return default + FUNC_EXECUTABLE = getenv("FUNC_EXECUTABLE", "func") FIFT_EXECUTABLE = getenv("FIFT_EXECUTABLE", "fift") -#FUNC_STDLIB = getenv("FUNC_STDLIB") -FIFT_LIBS = getenv("FIFT_LIBS") TMP_DIR = tempfile.mkdtemp() COMPILED_FIF = os.path.join(TMP_DIR, "compiled.fif") RUNNER_FIF = os.path.join(TMP_DIR, "runner.fif") @@ -25,22 +25,26 @@ if len(sys.argv) != 2: exit(1) TESTS_DIR = sys.argv[1] + class ExecutionError(Exception): pass + def compile_func(f): res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_FIF, "-SPA", f], capture_output=True, timeout=10) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) + def run_runner(): - res = subprocess.run([FIFT_EXECUTABLE, "-I", FIFT_LIBS, RUNNER_FIF], capture_output=True, timeout=10) + res = subprocess.run([FIFT_EXECUTABLE, RUNNER_FIF], capture_output=True, timeout=10) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) s = str(res.stdout, "utf-8") s = [x.strip() for x in s.split("\n")] return [x for x in s if x != ""] + tests = [s for s in os.listdir(TESTS_DIR) if s.endswith(".fc")] tests.sort() print("Found", len(tests), "tests", file=sys.stderr) @@ -68,18 +72,18 @@ for ti, tf in enumerate(tests): # preprocess arithmetics in input for i in range(len(cases)): - inputs = cases[i][1].split(" ") - processed_inputs = "" - for in_arg in inputs: - if "x{" in in_arg: - processed_inputs += in_arg - continue - # filter and execute - # is it safe enough? - filtered_in = "".join(filter(lambda x: x in "0x123456789()+-*/<>", in_arg)) - if(filtered_in): - processed_inputs += str(eval(filtered_in)) + " "; - cases[i][1] = processed_inputs.strip() + inputs = cases[i][1].split(" ") + processed_inputs = "" + for in_arg in inputs: + if "x{" in in_arg: + processed_inputs += in_arg + continue + # filter and execute + # is it safe enough? + filtered_in = "".join(filter(lambda x: x in "0x123456789()+-*/<>", in_arg)) + if filtered_in: + processed_inputs += str(eval(filtered_in)) + " " + cases[i][1] = processed_inputs.strip() with open(RUNNER_FIF, "w") as f: print("\"%s\" include >(std::istream& is, Bignum& x) { return is; } -bool is_prime(const Bignum& p, int nchecks, bool trial_div) { - return BN_is_prime_fasttest_ex(p.bn_ptr(), BN_prime_checks, get_ctx(), trial_div, 0); +bool is_prime(const Bignum& p) { +#if OPENSSL_VERSION_MAJOR >= 3 + int result = BN_check_prime(p.bn_ptr(), get_ctx(), nullptr); + LOG_IF(FATAL, result == -1); + return result; +#else + return BN_is_prime_fasttest_ex(p.bn_ptr(), BN_prime_checks, get_ctx(), true, 0); +#endif } } // namespace arith diff --git a/crypto/openssl/bignum.h b/crypto/openssl/bignum.h index 2a8dd8a0..032dbb02 100644 --- a/crypto/openssl/bignum.h +++ b/crypto/openssl/bignum.h @@ -335,7 +335,7 @@ const Bignum sqr(const Bignum& x); std::ostream& operator<<(std::ostream& os, const Bignum& x); std::istream& operator>>(std::istream& is, Bignum& x); -bool is_prime(const Bignum& p, int nchecks = 64, bool trial_div = true); +bool is_prime(const Bignum& p); inline int cmp(const Bignum& x, const Bignum& y) { return BN_cmp(x.bn_ptr(), y.bn_ptr()); diff --git a/crypto/parser/lexer.cpp b/crypto/parser/lexer.cpp index 624d8dd2..117f1df5 100644 --- a/crypto/parser/lexer.cpp +++ b/crypto/parser/lexer.cpp @@ -250,7 +250,6 @@ const Lexem& Lexer::next() { } if (is_multiline_quote(src.get_ptr(), src.get_end_ptr())) { src.advance(multiline_quote.size()); - const char* begin = src.get_ptr(); const char* end = nullptr; SrcLocation here = src.here(); std::string body; diff --git a/crypto/smc-envelope/ManualDns.h b/crypto/smc-envelope/ManualDns.h index b5dee59a..d24cd023 100644 --- a/crypto/smc-envelope/ManualDns.h +++ b/crypto/smc-envelope/ManualDns.h @@ -305,7 +305,7 @@ class ManualDns : public ton::SmartContract, public DnsInterface { if (!info.known_category.insert(action.category).second) { continue; } - if (action.category == 0) { + if (action.category.is_zero()) { info.closed = true; auto old_actions = std::move(info.actions); bool is_empty = true; @@ -327,7 +327,7 @@ class ManualDns : public ton::SmartContract, public DnsInterface { if (info.closed) { CombinedActions ca; ca.name = it.first; - ca.category = 0; + ca.category = td::Bits256::zero(); if (!info.actions.empty() || info.non_empty) { ca.actions = std::move(info.actions); } diff --git a/crypto/smc-envelope/SmartContract.cpp b/crypto/smc-envelope/SmartContract.cpp index 64e26944..b56810b0 100644 --- a/crypto/smc-envelope/SmartContract.cpp +++ b/crypto/smc-envelope/SmartContract.cpp @@ -181,7 +181,7 @@ SmartContract::Answer run_smartcont(SmartContract::State state, td::Ref config) { auto gas_credit = gas.gas_credit; - vm::init_op_cp0(debug_enabled); + vm::init_vm(debug_enabled).ensure(); vm::DictionaryBase::get_empty_dictionary(); class Logger : public td::LogInterface { diff --git a/crypto/test/fift.cpp b/crypto/test/fift.cpp index 049507fd..2b1ed54e 100644 --- a/crypto/test/fift.cpp +++ b/crypto/test/fift.cpp @@ -33,7 +33,7 @@ std::string load_test(std::string name) { return td::read_file_str(current_dir() + "fift/" + name).move_as_ok(); } -td::Status run_fift(std::string name, bool expect_error = false, bool preload_fift = true) { +td::Status run_fift(std::string name, bool expect_error = false) { auto res = fift::mem_run_fift(load_test(name)); if (expect_error) { res.ensure_error(); diff --git a/crypto/test/fift/rist255.fif b/crypto/test/fift/rist255.fif index 630eebb1..ddd9b619 100644 --- a/crypto/test/fift/rist255.fif +++ b/crypto/test/fift/rist255.fif @@ -9,6 +9,9 @@ [[ <{ RIST255_MULBASE DUP RIST255_VALIDATE }>s ]] 0 runvmx abort"Exitcode != 0" @' n . dup (x.) type cr @' ans <> abort"Invalid result" + @' n + [[ <{ 1 INT RIST255_MULBASE SWAP RIST255_MUL DUP RIST255_VALIDATE }>s ]] 0 runvmx abort"Exitcode != 0" + @' ans <> abort"Invalid result" } : test-basepoint 0 0x0000000000000000000000000000000000000000000000000000000000000000 test-basepoint diff --git a/crypto/test/modbigint.cpp b/crypto/test/modbigint.cpp index b34411f3..75051fa6 100644 --- a/crypto/test/modbigint.cpp +++ b/crypto/test/modbigint.cpp @@ -180,7 +180,7 @@ struct MixedRadix { template const MixedRadix& as_shorter() const { - static_assert(M <= N); + static_assert(M <= N,"error"); return *reinterpret_cast*>(this); } @@ -458,7 +458,7 @@ struct ModArray { } template ModArray(const ModArray& other) { - static_assert(M >= N); + static_assert(M >= N,"error"); std::copy(other.a, other.a + N, a); } ModArray(const int* p) : a(p) { @@ -819,7 +819,7 @@ struct ModArray { template const ModArray& as_shorter() const { - static_assert(M <= N); + static_assert(M <= N,"error"); return *reinterpret_cast*>(this); } diff --git a/crypto/test/test-bigint.cpp b/crypto/test/test-bigint.cpp index 7525c83a..a6f6e8d6 100644 --- a/crypto/test/test-bigint.cpp +++ b/crypto/test/test-bigint.cpp @@ -16,12 +16,12 @@ */ #include #include -#include #include #include #include #include #include +#include #include "common/refcnt.hpp" #include "common/bigint.hpp" #include "common/refint.h" @@ -211,7 +211,7 @@ bool coin() { // returns 0 with probability 1/2, 1 with prob. 1/4, ..., k with prob. 1/2^(k+1) int randexp(int max = 63, int min = 0) { - return min + __builtin_clzll(Random() | (1ULL << (63 - max + min))); + return min + td::count_leading_zeroes64(Random() | (1ULL << (63 - max + min))); } void bin_add_small(unsigned char bin[64], long long val, int shift = 0) { @@ -363,7 +363,7 @@ void check_one_int_repr(td::RefInt256 x, int mode, int in_range, const BInt* val if (is_small) { // special check for small (64-bit) values CHECK(x->to_long() == xval); - CHECK((long long)__builtin_bswap64(*(long long*)(bytes + 64 - 8)) == xval); + CHECK((long long)td::bswap64(*(long long*)(bytes + 64 - 8)) == xval); CHECK(in_range); // check sign CHECK(x->sgn() == (xval > 0 ? 1 : (xval < 0 ? -1 : 0))); diff --git a/crypto/test/test-db.cpp b/crypto/test/test-db.cpp index 413d774f..35727ee3 100644 --- a/crypto/test/test-db.cpp +++ b/crypto/test/test-db.cpp @@ -127,12 +127,12 @@ class BenchSha256Low : public td::Benchmark { void run(int n) override { int res = 0; - SHA256_CTX ctx; + td::Sha256State ctx; for (int i = 0; i < n; i++) { - SHA256_Init(&ctx); - SHA256_Update(&ctx, "abcd", 4); + ctx.init(); + ctx.feed("abcd"); unsigned char buf[32]; - SHA256_Final(buf, &ctx); + ctx.extract(td::MutableSlice{buf, 32}); res += buf[0]; } td::do_not_optimize_away(res); diff --git a/crypto/test/test-smartcont.cpp b/crypto/test/test-smartcont.cpp index d3f18813..98534bc5 100644 --- a/crypto/test/test-smartcont.cpp +++ b/crypto/test/test-smartcont.cpp @@ -958,7 +958,7 @@ class MapDns { } return; } - if (!actions.category.is_zero()) { + if (actions.category.is_zero()) { entries_.erase(actions.name); LOG(ERROR) << "CLEAR " << actions.name; if (!actions.actions) { @@ -1003,7 +1003,7 @@ class CheckedDns { explicit CheckedDns(bool check_smc = true, bool check_combine = true) { if (check_smc) { key_ = td::Ed25519::generate_private_key().move_as_ok(); - dns_ = ManualDns::create(ManualDns::create_init_data_fast(key_.value().get_public_key().move_as_ok(), 123)); + dns_ = ManualDns::create(ManualDns::create_init_data_fast(key_.value().get_public_key().move_as_ok(), 123), -1); } if (check_combine) { combined_map_dns_ = MapDns(); @@ -1094,9 +1094,10 @@ class CheckedDns { } }; -static td::Bits256 intToCat(int x) { - td::Bits256 cat = td::Bits256::zero(); - cat.as_slice().copy_from(td::Slice((char*)&x, sizeof(x))); +static td::Bits256 intToCat(td::uint32 x) { + auto y = td::make_refint(x); + td::Bits256 cat; + y->export_bytes(cat.data(), 32, false); return cat; } @@ -1182,7 +1183,7 @@ TEST(Smartcont, DnsManual) { auto key = td::Ed25519::generate_private_key().move_as_ok(); - auto manual = ManualDns::create(ManualDns::create_init_data_fast(key.get_public_key().move_as_ok(), 123)); + auto manual = ManualDns::create(ManualDns::create_init_data_fast(key.get_public_key().move_as_ok(), 123), -1); CHECK(manual->get_wallet_id().move_as_ok() == 123); auto init_query = manual->create_init_query(key).move_as_ok(); LOG(ERROR) << "A"; diff --git a/crypto/test/vm.cpp b/crypto/test/vm.cpp index 3227f8fa..0f1b0442 100644 --- a/crypto/test/vm.cpp +++ b/crypto/test/vm.cpp @@ -28,7 +28,7 @@ #include "td/utils/StringBuilder.h" std::string run_vm(td::Ref cell) { - vm::init_op_cp0(); + vm::init_vm().ensure(); vm::DictionaryBase::get_empty_dictionary(); class Logger : public td::LogInterface { diff --git a/crypto/tl/tlbc-gen-cpp.cpp b/crypto/tl/tlbc-gen-cpp.cpp index dedec15d..6edd0a12 100644 --- a/crypto/tl/tlbc-gen-cpp.cpp +++ b/crypto/tl/tlbc-gen-cpp.cpp @@ -159,7 +159,6 @@ std::string CppIdentSet::compute_cpp_ident(std::string orig_ident, int count) { } if (!cnt) { os << '_'; - prev_skip = true; } if (count) { os << count; diff --git a/crypto/tl/tlbc.cpp b/crypto/tl/tlbc.cpp index 409ac538..b48bc472 100644 --- a/crypto/tl/tlbc.cpp +++ b/crypto/tl/tlbc.cpp @@ -2252,11 +2252,9 @@ TypeExpr* parse_expr10(Lexer& lex, Constructor& cs, int mode) { } if (op == '>') { std::swap(expr, expr2); - op = '<'; op_name = Less_name; } else if (op == src::_Geq) { std::swap(expr, expr2); - op = src::_Leq; op_name = Leq_name; } auto sym_def = sym::lookup_symbol(op_name, 2); diff --git a/crypto/vm/arithops.cpp b/crypto/vm/arithops.cpp index 7da53567..1d3111b2 100644 --- a/crypto/vm/arithops.cpp +++ b/crypto/vm/arithops.cpp @@ -285,8 +285,11 @@ int exec_divmod(VmState* st, unsigned args, int quiet) { typename td::BigInt256::DoubleInt tmp{*x}, quot; tmp += *w; tmp.mod_div(*y, quot, round_mode); - stack.push_int_quiet(td::make_refint(quot), quiet); - stack.push_int_quiet(td::make_refint(tmp), quiet); + auto q = td::make_refint(quot), r = td::make_refint(tmp); + q.write().normalize(); + r.write().normalize(); + stack.push_int_quiet(std::move(q), quiet); + stack.push_int_quiet(std::move(r), quiet); } else { switch (d) { case 1: @@ -399,6 +402,7 @@ std::string dump_shrmod(CellSlice&, unsigned args, int mode) { if (mode & 1) { os << 'Q'; } + std::string end; switch (args & 12) { case 4: os << "RSHIFT"; @@ -407,17 +411,22 @@ std::string dump_shrmod(CellSlice&, unsigned args, int mode) { os << "MODPOW2"; break; case 12: - os << "RSHIFTMOD"; + os << "RSHIFT"; + end = "MOD"; break; case 0: - os << "ADDRSHIFTMOD"; + os << "ADDRSHIFT"; + end = "MOD"; break; } + if (!(mode & 2)) { + os << end; + } if (round_mode) { os << "FRC"[round_mode]; } if (mode & 2) { - os << ' ' << y; + os << "#" << end << ' ' << y; } return os.str(); } @@ -519,7 +528,7 @@ int exec_mulshrmod(VmState* st, unsigned args, int mode) { if (add) { tmp = *w; } - tmp.add_mul(*x, *y); + tmp.add_mul(*x, *y).normalize(); switch (d) { case 1: tmp.rshift(z, round_mode).normalize(); @@ -553,6 +562,7 @@ std::string dump_mulshrmod(CellSlice&, unsigned args, int mode) { if (mode & 1) { os << 'Q'; } + std::string end; switch (args & 12) { case 4: os << "MULRSHIFT"; @@ -561,15 +571,21 @@ std::string dump_mulshrmod(CellSlice&, unsigned args, int mode) { os << "MULMODPOW2"; break; case 12: - os << "MULRSHIFTMOD"; + os << "MULRSHIFT"; + end = "MOD"; break; case 0: - os << "MULADDRSHIFTMOD"; + os << "MULADDRSHIFT"; + end = "MOD"; break; } if (round_mode) { os << "FRC"[round_mode]; } + if (mode & 2) { + os << "#"; + } + os << end; if (mode & 2) { os << ' ' << y; } @@ -644,18 +660,22 @@ std::string dump_shldivmod(CellSlice&, unsigned args, int mode) { if (mode & 1) { os << "Q"; } + os << "LSHIFT"; + if (mode & 2) { + os << "#"; + } switch (args & 12) { case 4: - os << "LSHIFTDIV"; + os << "DIV"; break; case 8: - os << "LSHIFTMOD"; + os << "MOD"; break; case 12: - os << "LSHIFTDIVMOD"; + os << "DIVMOD"; break; case 0: - os << "LSHIFTADDDIVMOD"; + os << "ADDDIVMOD"; break; } if (round_mode) { diff --git a/crypto/vm/tonops.cpp b/crypto/vm/tonops.cpp index 0dd15d80..f491d252 100644 --- a/crypto/vm/tonops.cpp +++ b/crypto/vm/tonops.cpp @@ -101,7 +101,7 @@ void register_basic_gas_ops(OpcodeTable& cp0) { using namespace std::placeholders; cp0.insert(OpcodeInstr::mksimple(0xf800, 16, "ACCEPT", exec_accept)) .insert(OpcodeInstr::mksimple(0xf801, 16, "SETGASLIMIT", exec_set_gas_limit)) - .insert(OpcodeInstr::mksimple(0xf802, 16, "GASCONSUMED", exec_gas_consumed)->require_version(4)) + .insert(OpcodeInstr::mksimple(0xf806, 16, "GASCONSUMED", exec_gas_consumed)->require_version(4)) .insert(OpcodeInstr::mksimple(0xf80f, 16, "COMMIT", exec_commit)); } @@ -620,7 +620,6 @@ int exec_ristretto255_from_hash(VmState* st) { if (!x2->export_bytes(xb + 32, 32, false)) { throw VmError{Excno::range_chk, "x2 must fit in an unsigned 256-bit integer"}; } - CHECK(sodium_init() >= 0); crypto_core_ristretto255_from_hash(rb, xb); td::RefInt256 r{true}; CHECK(r.write().import_bytes(rb, 32, false)); @@ -633,8 +632,7 @@ int exec_ristretto255_validate(VmState* st, bool quiet) { Stack& stack = st->get_stack(); auto x = stack.pop_int(); st->consume_gas(VmState::rist255_validate_gas_price); - unsigned char xb[64]; - CHECK(sodium_init() >= 0); + unsigned char xb[32]; if (!x->export_bytes(xb, 32, false) || !crypto_core_ristretto255_is_valid_point(xb)) { if (quiet) { stack.push_bool(false); @@ -656,7 +654,6 @@ int exec_ristretto255_add(VmState* st, bool quiet) { auto x = stack.pop_int(); st->consume_gas(VmState::rist255_add_gas_price); unsigned char xb[32], yb[32], rb[32]; - CHECK(sodium_init() >= 0); if (!x->export_bytes(xb, 32, false) || !y->export_bytes(yb, 32, false) || crypto_core_ristretto255_add(rb, xb, yb)) { if (quiet) { stack.push_bool(false); @@ -681,7 +678,6 @@ int exec_ristretto255_sub(VmState* st, bool quiet) { auto x = stack.pop_int(); st->consume_gas(VmState::rist255_add_gas_price); unsigned char xb[32], yb[32], rb[32]; - CHECK(sodium_init() >= 0); if (!x->export_bytes(xb, 32, false) || !y->export_bytes(yb, 32, false) || crypto_core_ristretto255_sub(rb, xb, yb)) { if (quiet) { stack.push_bool(false); @@ -719,17 +715,20 @@ int exec_ristretto255_mul(VmState* st, bool quiet) { auto n = stack.pop_int() % get_ristretto256_l(); auto x = stack.pop_int(); st->consume_gas(VmState::rist255_mul_gas_price); - unsigned char xb[32], nb[32], rb[32]; - memset(rb, 255, sizeof(rb)); - CHECK(sodium_init() >= 0); - if (!x->export_bytes(xb, 32, false) || !export_bytes_little(n, nb) || crypto_scalarmult_ristretto255(rb, nb, xb)) { - if (std::all_of(rb, rb + 32, [](unsigned char c) { return c == 255; })) { - if (quiet) { - stack.push_bool(false); - return 0; - } - throw VmError{Excno::range_chk, "invalid x or n"}; + if (n->sgn() == 0) { + stack.push_smallint(0); + if (quiet) { + stack.push_bool(true); } + return 0; + } + unsigned char xb[32], nb[32], rb[32]; + if (!x->export_bytes(xb, 32, false) || !export_bytes_little(n, nb) || crypto_scalarmult_ristretto255(rb, nb, xb)) { + if (quiet) { + stack.push_bool(false); + return 0; + } + throw VmError{Excno::range_chk, "invalid x or n"}; } td::RefInt256 r{true}; CHECK(r.write().import_bytes(rb, 32, false)); @@ -747,7 +746,6 @@ int exec_ristretto255_mul_base(VmState* st, bool quiet) { st->consume_gas(VmState::rist255_mulbase_gas_price); unsigned char nb[32], rb[32]; memset(rb, 255, sizeof(rb)); - CHECK(sodium_init() >= 0); if (!export_bytes_little(n, nb) || crypto_scalarmult_ristretto255_base(rb, nb)) { if (std::all_of(rb, rb + 32, [](unsigned char c) { return c == 255; })) { if (quiet) { @@ -833,7 +831,7 @@ int exec_bls_verify(VmState* st) { VM_LOG(st) << "execute BLS_VERIFY"; Stack& stack = st->get_stack(); stack.check_underflow(3); - st->consume_gas(st->bls_verify_gas_price); + st->consume_gas(VmState::bls_verify_gas_price); bls::P2 sig = slice_to_bls_p2(*stack.pop_cellslice()); td::BufferSlice msg = slice_to_bls_msg(*stack.pop_cellslice()); bls::P1 pub = slice_to_bls_p1(*stack.pop_cellslice()); @@ -845,8 +843,7 @@ int exec_bls_aggregate(VmState* st) { VM_LOG(st) << "execute BLS_AGGREGATE"; Stack& stack = st->get_stack(); int n = stack.pop_smallint_range(stack.depth() - 1, 1); - st->consume_gas( - std::max(0LL, VmState::bls_aggregate_base_gas_price + (long long)n * VmState::bls_aggregate_element_gas_price)); + st->consume_gas(VmState::bls_aggregate_base_gas_price + (long long)n * VmState::bls_aggregate_element_gas_price); std::vector sigs(n); for (int i = n - 1; i >= 0; --i) { sigs[i] = slice_to_bls_p2(*stack.pop_cellslice()); diff --git a/crypto/vm/vm.cpp b/crypto/vm/vm.cpp index 4d3e67af..3f595a00 100644 --- a/crypto/vm/vm.cpp +++ b/crypto/vm/vm.cpp @@ -21,6 +21,8 @@ #include "vm/dict.h" #include "vm/log.h" #include "vm/vm.h" +#include "cp0.h" +#include namespace vm { @@ -770,4 +772,15 @@ void VmState::restore_parent_vm(int res) { } } +td::Status init_vm(bool enable_debug) { + if (!init_op_cp0(enable_debug)) { + return td::Status::Error("Failed to init TVM: failed to init cp0"); + } + auto code = sodium_init(); + if (code < 0) { + return td::Status::Error(PSTRING() << "Failed to init TVM: sodium_init, code=" << code); + } + return td::Status::OK(); +} + } // namespace vm diff --git a/crypto/vm/vm.h b/crypto/vm/vm.h index e3fda318..2066db4c 100644 --- a/crypto/vm/vm.h +++ b/crypto/vm/vm.h @@ -423,4 +423,6 @@ int run_vm_code(Ref _code, Stack& _stack, int flags = 0, Ref* d Ref lookup_library_in(td::ConstBitPtr key, Ref lib_root); +td::Status init_vm(bool enable_debug = false); + } // namespace vm diff --git a/dht-server/CMakeLists.txt b/dht-server/CMakeLists.txt index 889b3f30..6daac033 100644 --- a/dht-server/CMakeLists.txt +++ b/dht-server/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/dht/CMakeLists.txt b/dht/CMakeLists.txt index e50a7497..95ee7069 100644 --- a/dht/CMakeLists.txt +++ b/dht/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/emulator/CMakeLists.txt b/emulator/CMakeLists.txt index c23f071c..969f9a88 100644 --- a/emulator/CMakeLists.txt +++ b/emulator/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(BUILD_SHARED_LIBS "Use \"OFF\" for a static build." ON) diff --git a/emulator/emulator-extern.cpp b/emulator/emulator-extern.cpp index 3a439831..4b65dd06 100644 --- a/emulator/emulator-extern.cpp +++ b/emulator/emulator-extern.cpp @@ -139,7 +139,7 @@ const char *transaction_emulator_emulate_transaction(void *transaction_emulator, now = (unsigned)std::time(nullptr); } bool is_special = wc == ton::masterchainId && emulator->get_config().is_special_smartcontract(addr); - if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), td::Ref(), now, is_special)) { + if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), now, is_special)) { ERROR_RESPONSE(PSTRING() << "Can't unpack shard account"); } @@ -217,7 +217,7 @@ const char *transaction_emulator_emulate_tick_tock_transaction(void *transaction now = (unsigned)std::time(nullptr); } bool is_special = wc == ton::masterchainId && emulator->get_config().is_special_smartcontract(addr); - if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), td::Ref(), now, is_special)) { + if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), now, is_special)) { ERROR_RESPONSE(PSTRING() << "Can't unpack shard account"); } diff --git a/emulator/transaction-emulator.cpp b/emulator/transaction-emulator.cpp index 505c0c37..81cf2e9f 100644 --- a/emulator/transaction-emulator.cpp +++ b/emulator/transaction-emulator.cpp @@ -1,7 +1,7 @@ #include #include "transaction-emulator.h" #include "crypto/common/refcnt.hpp" -#include "vm/cp0.h" +#include "vm/vm.h" #include "tdutils/td/utils/Time.h" using td::Ref; @@ -34,7 +34,7 @@ td::Result> TransactionEmu return fetch_res.move_as_error_prefix("cannot fetch config params "); } - vm::init_op_cp0(debug_enabled_); + TRY_STATUS(vm::init_vm(debug_enabled_)); if (!lt) { lt = lt_; diff --git a/fec/CMakeLists.txt b/fec/CMakeLists.txt index b1ac37b1..2a305607 100644 --- a/fec/CMakeLists.txt +++ b/fec/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/fec/fec.cpp b/fec/fec.cpp index 102df038..f6379fd7 100644 --- a/fec/fec.cpp +++ b/fec/fec.cpp @@ -99,7 +99,7 @@ td::uint32 FecType::symbol_size() const { } td::Result FecType::create(tl_object_ptr obj) { - td::int32 data_size_int, symbol_size_int, symbols_count_int; + td::int32 data_size_int = 0, symbol_size_int = 0, symbols_count_int = 0; ton_api::downcast_call(*obj, td::overloaded([&](const auto &obj) { data_size_int = obj.data_size_; symbol_size_int = obj.symbol_size_; diff --git a/flake.lock b/flake.lock index ca44d4c0..d22f15d3 100644 --- a/flake.lock +++ b/flake.lock @@ -36,8 +36,8 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1682600000, - "narHash": "sha256-ha4BehR1dh8EnXSoE1m/wyyYVvHI9txjW4w5/oxsW5Y=", + "lastModified": 1698846319, + "narHash": "sha256-4jyW/dqFBVpWFnhl0nvP6EN4lP7/ZqPxYRjl6var0Oc=", "owner": "nixos", "repo": "nixpkgs", "rev": "50fc86b75d2744e1ab3837ef74b53f103a9b55a0", @@ -45,7 +45,7 @@ }, "original": { "owner": "nixos", - "ref": "nixos-22.05", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index cb70b5bb..4e993ac5 100644 --- a/flake.nix +++ b/flake.nix @@ -1,6 +1,6 @@ { inputs = { - nixpkgs-stable.url = "github:nixos/nixpkgs/nixos-22.05"; + nixpkgs-stable.url = "github:nixos/nixpkgs/nixos-23.05"; nixpkgs-trunk.url = "github:nixos/nixpkgs"; flake-compat = { url = "github:edolstra/flake-compat"; @@ -28,14 +28,14 @@ # then we can skip these manual overrides # and switch between pkgsStatic and pkgsStatic.pkgsMusl for static glibc and musl builds if !staticExternalDeps then [ - openssl_1_1 + openssl zlib libmicrohttpd libsodium secp256k1 ] else [ - (openssl_1_1.override { static = true; }).dev + (openssl.override { static = true; }).dev (zlib.override { shared = false; }).dev ] ++ optionals (!stdenv.isDarwin) [ pkgsStatic.libmicrohttpd.dev pkgsStatic.libsodium.dev secp256k1 ] @@ -50,6 +50,8 @@ ] ++ optionals (staticGlibc || staticMusl) [ "-DCMAKE_LINK_SEARCH_START_STATIC=ON" "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + ] ++ optionals (stdenv.isDarwin) [ + "-DCMAKE_CXX_FLAGS=-stdlib=libc++" "-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7" ]; LDFLAGS = optional staticExternalDeps (concatStringsSep " " [ diff --git a/http/CMakeLists.txt b/http/CMakeLists.txt index dbc57ec2..4a3fccf8 100644 --- a/http/CMakeLists.txt +++ b/http/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(HTTP_SOURCE http.h diff --git a/keyring/CMakeLists.txt b/keyring/CMakeLists.txt index 29e48ee9..f8f610f2 100644 --- a/keyring/CMakeLists.txt +++ b/keyring/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(KEYRING_SOURCE keyring.h diff --git a/keys/CMakeLists.txt b/keys/CMakeLists.txt index 486119de..e80436b7 100644 --- a/keys/CMakeLists.txt +++ b/keys/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(KEYS_SOURCE keys.cpp diff --git a/lite-client/CMakeLists.txt b/lite-client/CMakeLists.txt index b8449516..53e09d77 100644 --- a/lite-client/CMakeLists.txt +++ b/lite-client/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_library(lite-client-common lite-client-common.cpp lite-client-common.h) target_link_libraries(lite-client-common PUBLIC tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_crypto ton_block) diff --git a/lite-client/lite-client.cpp b/lite-client/lite-client.cpp index feedbe40..dd6df40f 100644 --- a/lite-client/lite-client.cpp +++ b/lite-client/lite-client.cpp @@ -4307,7 +4307,7 @@ int main(int argc, char* argv[]) { }); #endif - vm::init_op_cp0(true); // enable vm debug + vm::init_vm(true).ensure(); // enable vm debug td::actor::Scheduler scheduler({2}); diff --git a/memprof/CMakeLists.txt b/memprof/CMakeLists.txt index 8559c4d9..2ccf11df 100644 --- a/memprof/CMakeLists.txt +++ b/memprof/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(MEMPROF_SOURCE memprof/memprof.cpp diff --git a/overlay/CMakeLists.txt b/overlay/CMakeLists.txt index 7adc0584..ab9722a6 100644 --- a/overlay/CMakeLists.txt +++ b/overlay/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/rldp-http-proxy/CMakeLists.txt b/rldp-http-proxy/CMakeLists.txt index 92cb01ac..f7e30c80 100644 --- a/rldp-http-proxy/CMakeLists.txt +++ b/rldp-http-proxy/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h DNSResolver.cpp) target_include_directories(rldp-http-proxy PUBLIC $) diff --git a/rldp/CMakeLists.txt b/rldp/CMakeLists.txt index 813d346d..39e0d3ca 100644 --- a/rldp/CMakeLists.txt +++ b/rldp/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/rldp2/CMakeLists.txt b/rldp2/CMakeLists.txt index 1bfeb0bb..c144ec01 100644 --- a/rldp2/CMakeLists.txt +++ b/rldp2/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/storage/CMakeLists.txt b/storage/CMakeLists.txt index a5f36ff2..0208a33d 100644 --- a/storage/CMakeLists.txt +++ b/storage/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/storage/storage-daemon/CMakeLists.txt b/storage/storage-daemon/CMakeLists.txt index 4880eece..c987858f 100644 --- a/storage/storage-daemon/CMakeLists.txt +++ b/storage/storage-daemon/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_executable(embed-provider-code smartcont/embed-provider-code.cpp) diff --git a/storage/test/storage.cpp b/storage/test/storage.cpp index e7a97352..ff5a4831 100644 --- a/storage/test/storage.cpp +++ b/storage/test/storage.cpp @@ -400,7 +400,6 @@ class NetChannel : public td::actor::Actor { break; } else if (l > alive_end - eps) { alive_begin += alive_step + sleep_step; - alive_end = alive_begin + alive_step; } else { double new_l = td::min(alive_end, r); res += (new_l - l) * speed; @@ -516,9 +515,7 @@ class NetChannel : public td::actor::Actor { queue_ = {}; } - bool ok = false; while (!queue_.empty() && (double)queue_.front().size < got_) { - ok = true; auto query = queue_.pop(); got_ -= (double)query.size; total_size_ -= (double)query.size; diff --git a/tdactor/CMakeLists.txt b/tdactor/CMakeLists.txt index 3490eb17..46dd0335 100644 --- a/tdactor/CMakeLists.txt +++ b/tdactor/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #SOURCE SETS set(TDACTOR_SOURCE diff --git a/tdactor/benchmark/CMakeLists.txt b/tdactor/benchmark/CMakeLists.txt index e01d33dc..c4ff79a1 100644 --- a/tdactor/benchmark/CMakeLists.txt +++ b/tdactor/benchmark/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(BENCHMARK_SOURCE benchmark.cpp diff --git a/tdactor/test/actors_core.cpp b/tdactor/test/actors_core.cpp index ae10eb9b..96cd6239 100644 --- a/tdactor/test/actors_core.cpp +++ b/tdactor/test/actors_core.cpp @@ -720,7 +720,7 @@ TEST(Actor2, actor_function_result) { } TEST(Actor2, actor_ping_pong) { - Scheduler scheduler{{3}, Scheduler::Paused}; + Scheduler scheduler{{3}, false, Scheduler::Paused}; sb.clear(); scheduler.start(); @@ -799,7 +799,7 @@ TEST(Actor2, Schedulers) { for (auto run_count : {0, 1, 2}) { for (auto stop_count : {0, 1, 2}) { for (size_t threads : {0, 1}) { - Scheduler scheduler({threads}, mode); + Scheduler scheduler({threads}, false, mode); for (int i = 0; i < start_count; i++) { scheduler.start(); } diff --git a/tdactor/test/actors_promise.cpp b/tdactor/test/actors_promise.cpp index f1d57069..5717b394 100644 --- a/tdactor/test/actors_promise.cpp +++ b/tdactor/test/actors_promise.cpp @@ -210,7 +210,7 @@ TEST(Actor, promise_future) { TEST(Actor2, actor_lost_promise) { using namespace td::actor; using namespace td; - Scheduler scheduler({1}, Scheduler::Paused); + Scheduler scheduler({1}, false, Scheduler::Paused); auto watcher = td::create_shared_destructor([] { LOG(ERROR) << "STOP"; diff --git a/tddb/CMakeLists.txt b/tddb/CMakeLists.txt index 1acd5420..89730b95 100644 --- a/tddb/CMakeLists.txt +++ b/tddb/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #SOURCE SETS set(TDDB_UTILS_SOURCE diff --git a/tddb/td/db/utils/BlobView.cpp b/tddb/td/db/utils/BlobView.cpp index 7011a00e..ebfbc6d5 100644 --- a/tddb/td/db/utils/BlobView.cpp +++ b/tddb/td/db/utils/BlobView.cpp @@ -311,6 +311,7 @@ td::Result FileMemoryMappingBlobView::create(td::CSlice file_path, td: class CyclicBlobViewImpl : public BlobViewImpl { public: CyclicBlobViewImpl(td::BufferSlice data, td::uint64 total_size) : data_(std::move(data)), total_size_(total_size) { + CHECK(!data_.empty()); } td::Result view_impl(td::MutableSlice slice, td::uint64 offset) override { auto res = slice; diff --git a/tdfec/CMakeLists.txt b/tdfec/CMakeLists.txt index adfe2fdb..828ff90d 100644 --- a/tdfec/CMakeLists.txt +++ b/tdfec/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(TDFEC_SOURCE td/fec/raptorq/Rfc.cpp diff --git a/tdfec/benchmark/CMakeLists.txt b/tdfec/benchmark/CMakeLists.txt index 93ec575d..ee8f72cb 100644 --- a/tdfec/benchmark/CMakeLists.txt +++ b/tdfec/benchmark/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_executable(benchmark-fec benchmark.cpp ) target_include_directories(benchmark-fec PUBLIC $) diff --git a/tdfec/td/fec/raptorq/Rfc.h b/tdfec/td/fec/raptorq/Rfc.h index 1f5c27f0..e3a33131 100644 --- a/tdfec/td/fec/raptorq/Rfc.h +++ b/tdfec/td/fec/raptorq/Rfc.h @@ -61,7 +61,7 @@ class Rfc { template void encoding_row_for_each(EncodingRow t, F &&f) const { f(t.b); - for (uint16 j = 1; j < t.d; ++j) { + for (uint32 j = 1; j < t.d; ++j) { t.b = (t.b + t.a) % W; f(t.b); } diff --git a/tdnet/CMakeLists.txt b/tdnet/CMakeLists.txt index d5ae7086..bc00a676 100644 --- a/tdnet/CMakeLists.txt +++ b/tdnet/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(TDNET_SOURCE td/net/FdListener.cpp diff --git a/tdtl/CMakeLists.txt b/tdtl/CMakeLists.txt index b0f83cd9..482bd0f7 100644 --- a/tdtl/CMakeLists.txt +++ b/tdtl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #SOURCE SETS set(TDTL_SOURCE diff --git a/tdutils/CMakeLists.txt b/tdutils/CMakeLists.txt index 7b577e4f..f1e4b1ea 100644 --- a/tdutils/CMakeLists.txt +++ b/tdutils/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(TDUTILS_MIME_TYPE "Generate mime types conversion (gperf is required)" ON) @@ -314,7 +314,7 @@ if (WIN32) # find_library(WS2_32_LIBRARY ws2_32) # find_library(MSWSOCK_LIBRARY Mswsock) # target_link_libraries(tdutils PRIVATE ${WS2_32_LIBRARY} ${MSWSOCK_LIBRARY}) - target_link_libraries(tdutils PRIVATE ws2_32 Mswsock Normaliz psapi) + target_link_libraries(tdutils PRIVATE ws2_32 Mswsock Normaliz psapi DbgHelp) endif() if (NOT CMAKE_CROSSCOMPILING AND TDUTILS_MIME_TYPE) add_dependencies(tdutils tdmime_auto) diff --git a/tdutils/generate/CMakeLists.txt b/tdutils/generate/CMakeLists.txt index 07353e51..194fda39 100644 --- a/tdutils/generate/CMakeLists.txt +++ b/tdutils/generate/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) # Generates files for MIME type <-> extension conversions # DEPENDS ON: gperf grep bash/powershell diff --git a/tdutils/td/utils/BigNum.cpp b/tdutils/td/utils/BigNum.cpp index 36dde064..9de11fca 100644 --- a/tdutils/td/utils/BigNum.cpp +++ b/tdutils/td/utils/BigNum.cpp @@ -159,7 +159,11 @@ bool BigNum::is_bit_set(int num) const { } bool BigNum::is_prime(BigNumContext &context) const { +#if OPENSSL_VERSION_MAJOR >= 3 + int result = BN_check_prime(impl_->big_num, context.impl_->big_num_context, nullptr); +#else int result = BN_is_prime_ex(impl_->big_num, BN_prime_checks, context.impl_->big_num_context, nullptr); +#endif LOG_IF(FATAL, result == -1); return result == 1; } diff --git a/tdutils/td/utils/BufferedUdp.h b/tdutils/td/utils/BufferedUdp.h index bf4aa1b8..3fa93e9d 100644 --- a/tdutils/td/utils/BufferedUdp.h +++ b/tdutils/td/utils/BufferedUdp.h @@ -106,6 +106,7 @@ class UdpReader { } if (status.is_error() && !UdpSocketFd::is_critical_read_error(status)) { queue.push(UdpMessage{{}, {}, std::move(status)}); + return td::Status::OK(); } return status; } diff --git a/tdutils/td/utils/crypto.cpp b/tdutils/td/utils/crypto.cpp index 27313cf3..ea1efbe7 100644 --- a/tdutils/td/utils/crypto.cpp +++ b/tdutils/td/utils/crypto.cpp @@ -25,7 +25,6 @@ #include "td/utils/logging.h" #include "td/utils/misc.h" #include "td/utils/port/RwMutex.h" -#include "td/utils/port/thread_local.h" #include "td/utils/Random.h" #include "td/utils/ScopeGuard.h" #include "td/utils/SharedSlice.h" @@ -598,16 +597,23 @@ void aes_ige_decrypt(Slice aes_key, MutableSlice aes_iv, Slice from, MutableSlic static void aes_cbc_xcrypt(Slice aes_key, MutableSlice aes_iv, Slice from, MutableSlice to, bool encrypt_flag) { CHECK(aes_key.size() == 32); CHECK(aes_iv.size() == 16); - AES_KEY key; - int err; - if (encrypt_flag) { - err = AES_set_encrypt_key(aes_key.ubegin(), 256, &key); - } else { - err = AES_set_decrypt_key(aes_key.ubegin(), 256, &key); - } - LOG_IF(FATAL, err != 0); CHECK(from.size() <= to.size()); - AES_cbc_encrypt(from.ubegin(), to.ubegin(), from.size(), &key, aes_iv.ubegin(), encrypt_flag); + CHECK(from.size() % 16 == 0); + int out_len = 0; + EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new(); + CHECK(ctx); + if (encrypt_flag) { + CHECK(EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), nullptr, aes_key.ubegin(), aes_iv.ubegin()) == 1); + CHECK(EVP_CIPHER_CTX_set_padding(ctx, 0) == 1); + CHECK(EVP_EncryptUpdate(ctx, to.ubegin(), &out_len, from.ubegin(), td::narrow_cast(from.size())) == 1); + CHECK(EVP_EncryptFinal_ex(ctx, to.ubegin() + out_len, &out_len) == 1); + } else { + CHECK(EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), nullptr, aes_key.ubegin(), aes_iv.ubegin()) == 1); + CHECK(EVP_CIPHER_CTX_set_padding(ctx, 0) == 1); + CHECK(EVP_DecryptUpdate(ctx, to.ubegin(), &out_len, from.ubegin(), td::narrow_cast(from.size())) == 1); + CHECK(EVP_DecryptFinal_ex(ctx, to.ubegin() + out_len, &out_len) == 1); + } + EVP_CIPHER_CTX_free(ctx); } void aes_cbc_encrypt(Slice aes_key, MutableSlice aes_iv, Slice from, MutableSlice to) { @@ -723,7 +729,18 @@ string sha512(Slice data) { class Sha256State::Impl { public: - SHA256_CTX ctx_; + EVP_MD_CTX *ctx_ = nullptr; + + Impl() { + ctx_ = EVP_MD_CTX_new(); + CHECK(ctx_); + } + + ~Impl() { + if (ctx_) { + EVP_MD_CTX_free(ctx_); + } + } }; Sha256State::Sha256State() = default; @@ -755,24 +772,23 @@ void Sha256State::init() { impl_ = make_unique(); } CHECK(!is_inited_); - int err = SHA256_Init(&impl_->ctx_); - LOG_IF(FATAL, err != 1); + CHECK(EVP_DigestInit_ex(impl_->ctx_, EVP_sha256(), nullptr) == 1); is_inited_ = true; } void Sha256State::feed(Slice data) { CHECK(impl_); CHECK(is_inited_); - int err = SHA256_Update(&impl_->ctx_, data.ubegin(), data.size()); - LOG_IF(FATAL, err != 1); + CHECK(EVP_DigestUpdate(impl_->ctx_, data.ubegin(), data.size()) == 1); } void Sha256State::extract(MutableSlice output, bool destroy) { CHECK(output.size() >= 32); CHECK(impl_); CHECK(is_inited_); - int err = SHA256_Final(output.ubegin(), &impl_->ctx_); - LOG_IF(FATAL, err != 1); + unsigned size; + CHECK(EVP_DigestFinal_ex(impl_->ctx_, output.ubegin(), &size) == 1); + CHECK(size == 32); is_inited_ = false; if (destroy) { impl_.reset(); diff --git a/tdutils/td/utils/crypto.h b/tdutils/td/utils/crypto.h index 592a8a00..4494ef48 100644 --- a/tdutils/td/utils/crypto.h +++ b/tdutils/td/utils/crypto.h @@ -151,7 +151,7 @@ class Sha256State { bool is_inited_ = false; }; -void md5(Slice input, MutableSlice output); +[[deprecated("MD5 is not cryptographically secure")]] void md5(Slice input, MutableSlice output); void pbkdf2_sha256(Slice password, Slice salt, int iteration_count, MutableSlice dest); void pbkdf2_sha512(Slice password, Slice salt, int iteration_count, MutableSlice dest); diff --git a/tdutils/td/utils/port/stacktrace.cpp b/tdutils/td/utils/port/stacktrace.cpp index 2c025d2e..e89daec6 100644 --- a/tdutils/td/utils/port/stacktrace.cpp +++ b/tdutils/td/utils/port/stacktrace.cpp @@ -20,9 +20,13 @@ #include "td/utils/port/signals.h" -#if __GLIBC__ +#if TD_WINDOWS +#include +#else +#if TD_DARWIN || __GLIBC__ #include #endif +#endif #if TD_LINUX || TD_FREEBSD #include @@ -39,13 +43,48 @@ namespace td { namespace { void print_backtrace(void) { -#if __GLIBC__ +#if TD_WINDOWS + void *stack[100]; + HANDLE process = GetCurrentProcess(); + SymInitialize(process, nullptr, 1); + unsigned frames = CaptureStackBackTrace(0, 100, stack, nullptr); + signal_safe_write("------- Stack Backtrace -------\n", false); + for (unsigned i = 0; i < frames; i++) { + td::uint8 symbol_buf[sizeof(SYMBOL_INFO) + 256]; + auto symbol = (SYMBOL_INFO *)symbol_buf; + memset(symbol_buf, 0, sizeof(symbol_buf)); + symbol->MaxNameLen = 255; + symbol->SizeOfStruct = sizeof(SYMBOL_INFO); + SymFromAddr(process, (DWORD64)(stack[i]), nullptr, symbol); + // Don't use sprintf here because it is not signal-safe + char buf[256 + 32]; + char* buf_ptr = buf; + if (frames - i - 1 < 10) { + strcpy(buf_ptr, " "); + buf_ptr += strlen(buf_ptr); + } + _itoa(frames - i - 1, buf_ptr, 10); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, ": ["); + buf_ptr += strlen(buf_ptr); + _ui64toa(td::uint64(symbol->Address), buf_ptr, 16); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, "] "); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, symbol->Name); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, "\n"); + signal_safe_write(td::Slice{buf, strlen(buf)}, false); + } +#else +#if TD_DARWIN || __GLIBC__ void *buffer[128]; int nptrs = backtrace(buffer, 128); signal_safe_write("------- Stack Backtrace -------\n", false); backtrace_symbols_fd(buffer, nptrs, 2); signal_safe_write("-------------------------------\n", false); #endif +#endif } void print_backtrace_gdb(void) { @@ -129,7 +168,7 @@ void Stacktrace::print_to_stderr(const PrintOptions &options) { } void Stacktrace::init() { -#if __GLIBC__ +#if TD_DARWIN || __GLIBC__ // backtrace needs to be called once to ensure that next calls are async-signal-safe void *buffer[1]; backtrace(buffer, 1); diff --git a/tdutils/test/MpmcWaiter.cpp b/tdutils/test/MpmcWaiter.cpp index 9cb5b363..d0a9fc84 100644 --- a/tdutils/test/MpmcWaiter.cpp +++ b/tdutils/test/MpmcWaiter.cpp @@ -75,9 +75,9 @@ void test_waiter_stress_one_one() { TEST(MpmcEagerWaiter, stress_one_one) { test_waiter_stress_one_one(); } -TEST(MpmcSleepyWaiter, stress_one_one) { - test_waiter_stress_one_one(); -} +// TEST(MpmcSleepyWaiter, stress_one_one) { +// test_waiter_stress_one_one(); +// } template void test_waiter_stress() { diff --git a/terminal/CMakeLists.txt b/terminal/CMakeLists.txt index ae8c70bd..af51153f 100644 --- a/terminal/CMakeLists.txt +++ b/terminal/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/test/ed25519_crypto.cpp b/test/ed25519_crypto.cpp deleted file mode 100644 index 371b7247..00000000 --- a/test/ed25519_crypto.cpp +++ /dev/null @@ -1,2053 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include -#include -#include -#include -#include - -// ****************************************************** - -namespace openssl { -#include -} - -namespace arith { -struct dec_string { - std::string str; - explicit dec_string(const std::string& s) : str(s) { - } -}; - -struct hex_string { - std::string str; - explicit hex_string(const std::string& s) : str(s) { - } -}; -} // namespace arith - -namespace arith { - -using namespace openssl; - -inline void bn_assert(int cond); -BN_CTX* get_ctx(); - -class BignumBitref { - BIGNUM* ptr; - int n; - - public: - BignumBitref(BIGNUM& x, int _n) : ptr(&x), n(_n){}; - operator bool() const { - return BN_is_bit_set(ptr, n); - } - BignumBitref& operator=(bool val); -}; - -class Bignum { - BIGNUM val; - - public: - class bignum_error {}; - Bignum() { - BN_init(&val); - } - Bignum(long x) { - BN_init(&val); - set_long(x); - } - ~Bignum() { - BN_free(&val); - } - Bignum(const dec_string& ds) { - BN_init(&val); - set_dec_str(ds.str); - } - Bignum(const hex_string& hs) { - BN_init(&val); - set_hex_str(hs.str); - } - Bignum(const Bignum& x) { - BN_init(&val); - BN_copy(&val, &x.val); - } - //Bignum (Bignum&& x) { val = x.val; } - void clear() { - BN_clear(&val); - } // use this for sensitive data - Bignum& operator=(const Bignum& x) { - BN_copy(&val, &x.val); - return *this; - } - Bignum& operator=(Bignum&& x) { - swap(x); - return *this; - } - Bignum& operator=(long x) { - return set_long(x); - } - Bignum& operator=(const dec_string& ds) { - return set_dec_str(ds.str); - } - Bignum& operator=(const hex_string& hs) { - return set_hex_str(hs.str); - } - Bignum& swap(Bignum& x) { - BN_swap(&val, &x.val); - return *this; - } - BIGNUM* bn_ptr() { - return &val; - } - const BIGNUM* bn_ptr() const { - return &val; - } - bool is_zero() const { - return BN_is_zero(&val); - } - int sign() const { - return BN_is_zero(&val) ? 0 : (BN_is_negative(&val) ? -1 : 1); - } - bool odd() const { - return BN_is_odd(&val); - } - int num_bits() const { - return BN_num_bits(&val); - } - int num_bytes() const { - return BN_num_bytes(&val); - } - bool operator[](int n) const { - return BN_is_bit_set(&val, n); - } - BignumBitref operator[](int n) { - return BignumBitref(val, n); - } - void export_msb(unsigned char* buffer, std::size_t size) const; - Bignum& import_msb(const unsigned char* buffer, std::size_t size); - Bignum& import_msb(const std::string& s) { - return import_msb((const unsigned char*)s.c_str(), s.size()); - } - void export_lsb(unsigned char* buffer, std::size_t size) const; - Bignum& import_lsb(const unsigned char* buffer, std::size_t size); - Bignum& import_lsb(const std::string& s) { - return import_lsb((const unsigned char*)s.c_str(), s.size()); - } - - Bignum& set_dec_str(std::string s) { - BIGNUM* tmp = &val; - bn_assert(BN_dec2bn(&tmp, s.c_str())); - return *this; - } - - Bignum& set_hex_str(std::string s) { - BIGNUM* tmp = &val; - bn_assert(BN_hex2bn(&tmp, s.c_str())); - return *this; - } - - Bignum& set_ulong(unsigned long x) { - bn_assert(BN_set_word(&val, x)); - return *this; - } - - Bignum& set_long(long x) { - set_ulong(std::abs(x)); - return x < 0 ? negate() : *this; - } - - Bignum& negate() { - BN_set_negative(&val, !BN_is_negative(&val)); - return *this; - } - - Bignum& operator+=(const Bignum& y) { - bn_assert(BN_add(&val, &val, &y.val)); - return *this; - } - - Bignum& operator+=(long y) { - bn_assert((y >= 0 ? BN_add_word : BN_sub_word)(&val, std::abs(y))); - return *this; - } - - Bignum& operator-=(long y) { - bn_assert((y >= 0 ? BN_sub_word : BN_add_word)(&val, std::abs(y))); - return *this; - } - - Bignum& operator*=(const Bignum& y) { - bn_assert(BN_mul(&val, &val, &y.val, get_ctx())); - return *this; - } - - Bignum& operator*=(long y) { - if (y < 0) { - negate(); - } - bn_assert(BN_mul_word(&val, std::abs(y))); - return *this; - } - - Bignum& operator<<=(int r) { - bn_assert(BN_lshift(&val, &val, r)); - return *this; - } - - Bignum& operator>>=(int r) { - bn_assert(BN_rshift(&val, &val, r)); - return *this; - } - - Bignum& operator/=(const Bignum& y) { - Bignum w; - bn_assert(BN_div(&val, &w.val, &val, &y.val, get_ctx())); - return *this; - } - - Bignum& operator/=(long y) { - bn_assert(BN_div_word(&val, std::abs(y)) != (BN_ULONG)(-1)); - return y < 0 ? negate() : *this; - } - - Bignum& operator%=(const Bignum& y) { - bn_assert(BN_mod(&val, &val, &y.val, get_ctx())); - return *this; - } - - Bignum& operator%=(long y) { - BN_ULONG rem = BN_mod_word(&val, std::abs(y)); - bn_assert(rem != (BN_ULONG)(-1)); - return set_long(y < 0 ? -rem : rem); - } - - unsigned long divmod(unsigned long y) { - BN_ULONG rem = BN_div_word(&val, y); - bn_assert(rem != (BN_ULONG)(-1)); - return rem; - } - - const Bignum divmod(const Bignum& y); - - std::string to_str() const; - std::string to_hex() const; -}; - -inline void bn_assert(int cond) { - if (!cond) { - throw Bignum::bignum_error(); - } -} - -BN_CTX* get_ctx(void) { - static BN_CTX* ctx = BN_CTX_new(); - return ctx; -} - -BignumBitref& BignumBitref::operator=(bool val) { - if (val) { - BN_set_bit(ptr, n); - } else { - BN_clear_bit(ptr, n); - } - return *this; -} - -const Bignum operator+(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_add(z.bn_ptr(), x.bn_ptr(), y.bn_ptr())); - return z; -} - -const Bignum operator+(const Bignum& x, long y) { - if (y > 0) { - Bignum z(x); - bn_assert(BN_add_word(z.bn_ptr(), y)); - return z; - } else if (y < 0) { - Bignum z(x); - bn_assert(BN_sub_word(z.bn_ptr(), -y)); - return z; - } else { - return x; - } -} - -/* - const Bignum operator+ (Bignum&& x, long y) { - if (y > 0) { - bn_assert (BN_add_word (x.bn_ptr(), y)); - } else if (y < 0) { - bn_assert (BN_sub_word (x.bn_ptr(), -y)); - } - return std::move (x); - } - */ - -const Bignum operator+(long y, const Bignum& x) { - return x + y; -} - -/* - const Bignum operator+ (long y, Bignum&& x) { - return x + y; - } - */ - -const Bignum operator-(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_sub(z.bn_ptr(), x.bn_ptr(), y.bn_ptr())); - return z; -} - -const Bignum operator-(const Bignum& x, long y) { - return x + (-y); -} - -/* - const Bignum operator- (Bignum&& x, long y) { - return x + (-y); - } - */ - -const Bignum operator*(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_mul(z.bn_ptr(), x.bn_ptr(), y.bn_ptr(), get_ctx())); - return z; -} - -const Bignum operator*(const Bignum& x, long y) { - if (y > 0) { - Bignum z(x); - bn_assert(BN_mul_word(z.bn_ptr(), y)); - return z; - } else if (y < 0) { - Bignum z(x); - z.negate(); - bn_assert(BN_mul_word(z.bn_ptr(), -y)); - return z; - } else { - Bignum z(0); - return z; - } -} - -/* - const Bignum operator* (Bignum&& x, long y) { - if (y > 0) { - bn_assert (BN_mul_word (x.bn_ptr(), y)); - } else if (y < 0) { - x.negate(); - bn_assert (BN_mul_word (x.bn_ptr(), -y)); - } else { - x = 0; - } - return std::move (x); - } - */ - -const Bignum operator*(long y, const Bignum& x) { - return x * y; -} - -const Bignum operator/(const Bignum& x, const Bignum& y) { - Bignum z, w; - bn_assert(BN_div(z.bn_ptr(), w.bn_ptr(), x.bn_ptr(), y.bn_ptr(), get_ctx())); - return z; -} - -const Bignum Bignum::divmod(const Bignum& y) { - Bignum w; - bn_assert(BN_div(&val, w.bn_ptr(), &val, y.bn_ptr(), get_ctx())); - return w; -} - -const Bignum operator%(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_mod(z.bn_ptr(), x.bn_ptr(), y.bn_ptr(), get_ctx())); - return z; -} - -unsigned long operator%(const Bignum& x, unsigned long y) { - BN_ULONG rem = BN_mod_word(x.bn_ptr(), y); - bn_assert(rem != (BN_ULONG)(-1)); - return rem; -} - -const Bignum operator<<(const Bignum& x, int r) { - Bignum z; - bn_assert(BN_lshift(z.bn_ptr(), x.bn_ptr(), r)); - return z; -} - -const Bignum operator>>(const Bignum& x, int r) { - Bignum z; - bn_assert(BN_rshift(z.bn_ptr(), x.bn_ptr(), r)); - return z; -} - -const Bignum abs(const Bignum& x) { - Bignum T(x); - if (T.sign() < 0) { - T.negate(); - } - return T; -} - -const Bignum sqr(const Bignum& x) { - Bignum z; - bn_assert(BN_sqr(z.bn_ptr(), x.bn_ptr(), get_ctx())); - return z; -} - -void Bignum::export_msb(unsigned char* buffer, std::size_t size) const { - bn_assert(size >= 0 && size <= (1 << 20)); - bn_assert(sign() >= 0); - int n = BN_num_bytes(&val); - bn_assert(n >= 0 && (unsigned)n <= size); - bn_assert(BN_bn2bin(&val, buffer + size - n) == n); - std::memset(buffer, 0, size - n); -} - -Bignum& Bignum::import_msb(const unsigned char* buffer, std::size_t size) { - bn_assert(size >= 0 && size <= (1 << 20)); - std::size_t i = 0; - while (i < size && !buffer[i]) { - i++; - } - bn_assert(BN_bin2bn(buffer + i, size - i, &val) == &val); - return *this; -} - -void Bignum::export_lsb(unsigned char* buffer, std::size_t size) const { - bn_assert(size >= 0 && size <= (1 << 20)); - bn_assert(sign() >= 0); - std::size_t n = BN_num_bytes(&val); - bn_assert(n >= 0 && (unsigned)n <= size); - bn_assert(BN_bn2bin(&val, buffer) == (int)n); - std::memset(buffer + n, 0, size - n); - for (std::size_t i = 0; 2 * i + 1 < n; i++) { - std::swap(buffer[i], buffer[n - 1 - i]); - } -} - -Bignum& Bignum::import_lsb(const unsigned char* buffer, std::size_t size) { - bn_assert(size >= 0 && size <= (1 << 20)); - while (size > 0 && !buffer[size - 1]) { - size--; - } - if (!size) { - bn_assert(BN_zero(&val)); - return *this; - } - unsigned char tmp[size], *ptr = tmp + size; - for (std::size_t i = 0; i < size; i++) { - *--ptr = buffer[i]; - } - bn_assert(BN_bin2bn(tmp, size, &val) == &val); - return *this; -} - -int cmp(const Bignum& x, const Bignum& y) { - return BN_cmp(x.bn_ptr(), y.bn_ptr()); -} - -bool operator==(const Bignum& x, const Bignum& y) { - return cmp(x, y) == 0; -} - -bool operator!=(const Bignum& x, const Bignum& y) { - return cmp(x, y) != 0; -} - -bool operator<(const Bignum& x, const Bignum& y) { - return cmp(x, y) < 0; -} - -bool operator<=(const Bignum& x, const Bignum& y) { - return cmp(x, y) <= 0; -} - -bool operator>(const Bignum& x, const Bignum& y) { - return cmp(x, y) > 0; -} - -bool operator>=(const Bignum& x, const Bignum& y) { - return cmp(x, y) >= 0; -} - -bool operator==(const Bignum& x, long y) { - if (y >= 0) { - return BN_is_word(x.bn_ptr(), y); - } else { - return x == Bignum(y); - } -} - -bool operator!=(const Bignum& x, long y) { - if (y >= 0) { - return !BN_is_word(x.bn_ptr(), y); - } else { - return x != Bignum(y); - } -} - -std::string Bignum::to_str() const { - char* ptr = BN_bn2dec(&val); - std::string z(ptr); - OPENSSL_free(ptr); - return z; -} - -std::string Bignum::to_hex() const { - char* ptr = BN_bn2hex(&val); - std::string z(ptr); - OPENSSL_free(ptr); - return z; -} - -std::ostream& operator<<(std::ostream& os, const Bignum& x) { - return os << x.to_str(); -} - -std::istream& operator>>(std::istream& is, Bignum& x) { - std::string word; - is >> word; - x = dec_string(word); - return is; -} - -bool is_prime(const Bignum& p, int nchecks = 64, bool trial_div = true) { - return BN_is_prime_fasttest_ex(p.bn_ptr(), BN_prime_checks, get_ctx(), trial_div, 0); -} -} // namespace arith - -namespace arith { -using namespace openssl; - -class Residue; -class ResidueRing; - -class ResidueRing { - public: - struct bad_modulus {}; - struct elem_cnt_mismatch { - int cnt; - elem_cnt_mismatch(int x) : cnt(x) { - } - }; - - private: - const Bignum modulus; - mutable int cnt; - bool prime; - void cnt_assert(bool b) { - if (!b) { - throw elem_cnt_mismatch(cnt); - } - } - Residue* Zero; - Residue* One; - Residue* Img_i; - void init(); - - public: - typedef Residue element; - explicit ResidueRing(Bignum mod) : modulus(mod), cnt(0), prime(arith::is_prime(mod)), Zero(0), One(0) { - init(); - } - ~ResidueRing(); - int incr_count() { - return ++cnt; - } - int decr_count() { - --cnt; - cnt_assert(cnt >= 0); - return cnt; - } - const Bignum& get_modulus() const { - return modulus; - } - bool is_prime() const { - return prime; - } - const Residue& zero() const { - return *Zero; - } - const Residue& one() const { - return *One; - } - const Residue& img_i(); - Residue frac(long num, long denom = 1); - Residue convert(long num); - Residue convert(const Bignum& x); - - Bignum reduce(const Bignum& x) { - Bignum r = x % modulus; - if (r.sign() < 0) { - r += modulus; - } - return r; - } - - Bignum& do_reduce(Bignum& x) { - x %= modulus; - if (x.sign() < 0) { - x += modulus; - } - return x; - } -}; - -class Residue { - public: - struct not_same_ring {}; - - private: - ResidueRing* ring; - mutable Bignum val; - Residue& reduce() { - ring->do_reduce(val); - return *this; - } - - public: - explicit Residue(ResidueRing& R) : ring(&R) { - R.incr_count(); - } - Residue(const Bignum& x, ResidueRing& R) : ring(&R), val(R.reduce(x)) { - R.incr_count(); - } - ~Residue() { - ring->decr_count(); - ring = 0; - } - Residue(const Residue& x) : ring(x.ring), val(x.val) { - ring->incr_count(); - } - Bignum extract() const { - return val; - } - const Bignum& extract_raw() const { - return val; - } - const Bignum& modulus() const { - return ring->get_modulus(); - } - void same_ring(const Residue& y) const { - if (ring != y.ring) { - throw not_same_ring(); - } - } - ResidueRing& ring_of() const { - return *ring; - } - bool is_zero() const { - return (val == 0); - } - Residue& operator=(const Residue& x) { - same_ring(x); - val = x.val; - return *this; - } - Residue& operator=(const Bignum& x) { - val = ring->reduce(x); - return *this; - } - Residue& operator+=(const Residue& y); - Residue& operator-=(const Residue& y); - Residue& operator*=(const Residue& y); - Residue& operator+=(long y) { - val += y; - return reduce(); - } - Residue& operator-=(long y) { - val -= y; - return reduce(); - } - Residue& operator*=(long y) { - val *= y; - return reduce(); - } - Residue& negate() { - val.negate(); - return reduce(); - } - friend const Residue operator+(const Residue& x, const Residue& y); - friend const Residue operator-(const Residue& x, const Residue& y); - friend const Residue operator*(const Residue& x, const Residue& y); - friend const Residue operator-(const Residue& x); - friend Residue sqr(const Residue& x); - friend Residue power(const Residue& x, const Bignum& y); - friend Residue inverse(const Residue& x); - std::string to_str() const; -}; - -void ResidueRing::init() { - Zero = new Residue(0, *this); - One = new Residue(1, *this); -} - -ResidueRing::~ResidueRing() { - delete Zero; - delete One; - Zero = One = 0; - cnt_assert(!cnt); -} - -const Residue operator+(const Residue& x, const Residue& y) { - x.same_ring(y); - Residue z(x.ring_of()); - bn_assert(BN_mod_add(z.val.bn_ptr(), x.val.bn_ptr(), y.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -const Residue operator-(const Residue& x, const Residue& y) { - x.same_ring(y); - Residue z(x.ring_of()); - bn_assert(BN_mod_sub(z.val.bn_ptr(), x.val.bn_ptr(), y.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -const Residue operator*(const Residue& x, const Residue& y) { - x.same_ring(y); - Residue z(x.ring_of()); - bn_assert(BN_mod_mul(z.val.bn_ptr(), x.val.bn_ptr(), y.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -const Residue operator-(const Residue& x) { - Residue z(x); - z.val.negate(); - return z.reduce(); -} - -Residue& Residue::operator+=(const Residue& y) { - same_ring(y); - bn_assert(BN_mod_add(val.bn_ptr(), val.bn_ptr(), y.val.bn_ptr(), modulus().bn_ptr(), get_ctx())); - return *this; -} - -Residue& Residue::operator-=(const Residue& y) { - same_ring(y); - bn_assert(BN_mod_sub(val.bn_ptr(), val.bn_ptr(), y.val.bn_ptr(), modulus().bn_ptr(), get_ctx())); - return *this; -} - -Residue& Residue::operator*=(const Residue& y) { - same_ring(y); - bn_assert(BN_mod_mul(val.bn_ptr(), val.bn_ptr(), y.val.bn_ptr(), modulus().bn_ptr(), get_ctx())); - return *this; -} - -bool operator==(const Residue& x, const Residue& y) { - x.same_ring(y); - return x.extract() == y.extract(); -} - -bool operator!=(const Residue& x, const Residue& y) { - x.same_ring(y); - return x.extract() != y.extract(); -} - -Residue sqr(const Residue& x) { - Residue z(x.ring_of()); - bn_assert(BN_mod_sqr(z.val.bn_ptr(), x.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -Residue power(const Residue& x, const Bignum& y) { - Residue z(x.ring_of()); - bn_assert(BN_mod_exp(z.val.bn_ptr(), x.val.bn_ptr(), y.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -Residue inverse(const Residue& x) { - assert(x.ring_of().is_prime()); - return power(x, x.ring_of().get_modulus() - 2); -} - -const Residue& ResidueRing::img_i() { - if (!Img_i) { - assert(is_prime()); - assert(modulus % 4 == 1); - int g = 2; - Bignum n = (modulus - 1) / 4; - while (true) { - Residue t = power(frac(g), n); - if (t != one() && t != frac(-1)) { - Img_i = new Residue(t); - break; - } - } - } - return *Img_i; -} - -Residue sqrt(const Residue& x) { - assert(x.ring_of().is_prime()); - ResidueRing& R = x.ring_of(); - const Bignum& p = R.get_modulus(); - if (x.is_zero() || !p.odd()) { - return x; - } - if (p[1]) { // p=3 (mod 4) - return power(x, (p + 1) >> 2); - } else if (p[2]) { - // p=5 (mod 8) - Residue t = power(x, (p + 3) >> 3); - return (sqr(t) == x) ? t : R.img_i() * t; - } else { - assert(p[2]); - return R.zero(); - } -} - -Residue ResidueRing::frac(long num, long denom) { - assert(denom); - if (denom < 0) { - num = -num; - denom = -denom; - } - if (!(num % denom)) { - return Residue(num / denom, *this); - } else { - return Residue(num, *this) * inverse(Residue(denom, *this)); - } -} - -inline Residue ResidueRing::convert(long x) { - return Residue(x, *this); -} - -inline Residue ResidueRing::convert(const Bignum& x) { - return Residue(x, *this); -} - -std::string Residue::to_str() const { - return "Mod(" + val.to_str() + "," + modulus().to_str() + ")"; -} - -std::ostream& operator<<(std::ostream& os, const Residue& x) { - return os << x.to_str(); -} - -std::istream& operator>>(std::istream& is, Residue& x) { - std::string word; - is >> word; - x = dec_string(word); - return is; -} -} // namespace arith - -// ****************************************************** - -namespace ellcurve { -using namespace arith; - -const Bignum& P25519() { - static Bignum P25519 = (Bignum(1) << 255) - 19; - return P25519; -} - -ResidueRing& Fp25519() { - static ResidueRing Fp25519(P25519()); - return Fp25519; -} -} // namespace ellcurve - -// ****************************************************** - -namespace ellcurve { -using namespace arith; - -class MontgomeryCurve { - ResidueRing& ring; - int A_short; // v^2 = u^2 + Au + 1 - int Gu_short; // u(G) - int a_short; // (A+2)/4 - Residue A; - Residue Gu; - Bignum P; - Bignum L; - Bignum Order; - Bignum cofactor; - int cofactor_short; - - void init(); - - public: - MontgomeryCurve(int _A, int _Gu, ResidueRing& _R) - : ring(_R) - , A_short(_A) - , Gu_short(_Gu) - , a_short((_A + 2) / 4) - , A(_A, _R) - , Gu(_Gu, _R) - , P(_R.get_modulus()) - , cofactor_short(0) { - init(); - } - - const Residue& get_gen_u() const { - return Gu; - } - const Bignum& get_ell() const { - return L; - } - const Bignum& get_order() const { - return Order; - } - ResidueRing& get_base_ring() const { - return ring; - } - const Bignum& get_p() const { - return P; - } - - void set_order_cofactor(const Bignum& order, int cof); - - struct PointXZ { - Residue X, Z; - PointXZ(Residue x, Residue z) : X(x), Z(z) { - x.same_ring(z); - } - PointXZ(ResidueRing& r) : X(r.one()), Z(r.zero()) { - } - explicit PointXZ(Residue u) : X(u), Z(u.ring_of().one()) { - } - explicit PointXZ(Residue y, bool) : X(y.ring_of().one() - y), Z(y + y.ring_of().one()) { - } - PointXZ(const PointXZ& P) : X(P.X), Z(P.Z) { - } - PointXZ& operator=(const PointXZ& P) { - X = P.X; - Z = P.Z; - return *this; - } - Residue get_u() const { - return X * inverse(Z); - } - Residue get_v(bool sign_v = false) const; - bool is_infty() const { - return Z.is_zero(); - } - Residue get_y() const { - return (X - Z) * inverse(X + Z); - } - bool export_point_y(unsigned char buffer[32]) const; - bool export_point_u(unsigned char buffer[32]) const; - void zeroize() { - X = Z = Z.ring_of().zero(); - } - }; - - PointXZ power_gen_xz(const Bignum& n) const; - PointXZ power_xz(const Residue& u, const Bignum& n) const; - PointXZ power_xz(const PointXZ& P, const Bignum& n) const; - PointXZ add_xz(const PointXZ& P, const PointXZ& Q) const; - PointXZ double_xz(const PointXZ& P) const; - - PointXZ import_point_u(const unsigned char point[32]) const; - PointXZ import_point_y(const unsigned char point[32]) const; -}; - -void MontgomeryCurve::init() { - assert(!((a_short + 2) & 3) && a_short >= 0); -} - -void MontgomeryCurve::set_order_cofactor(const Bignum& order, int cof) { - assert(order > 0); - assert(cof >= 0); - assert(cof == 0 || (order % cof) == 0); - Order = order; - cofactor = cofactor_short = cof; - if (cof > 0) { - L = order / cof; - assert(is_prime(L)); - } - assert(!power_gen_xz(1).is_infty()); - assert(power_gen_xz(Order).is_infty()); -} - -// computes u(P+Q)*u(P-Q) as X/Z -MontgomeryCurve::PointXZ MontgomeryCurve::add_xz(const MontgomeryCurve::PointXZ& P, - const MontgomeryCurve::PointXZ& Q) const { - Residue u = (P.X + P.Z) * (Q.X - Q.Z); - Residue v = (P.X - P.Z) * (Q.X + Q.Z); - return MontgomeryCurve::PointXZ(sqr(u + v), sqr(u - v)); -} - -// computes u(2P) as X/Z -MontgomeryCurve::PointXZ MontgomeryCurve::double_xz(const MontgomeryCurve::PointXZ& P) const { - Residue u = sqr(P.X + P.Z); - Residue v = sqr(P.X - P.Z); - Residue w = u - v; - return PointXZ(u * v, w * (v + Residue(a_short, ring) * w)); -} - -MontgomeryCurve::PointXZ MontgomeryCurve::power_gen_xz(const Bignum& n) const { - return power_xz(Gu, n); -} - -MontgomeryCurve::PointXZ MontgomeryCurve::power_xz(const Residue& u, const Bignum& n) const { - return power_xz(PointXZ(u), n); -} - -// computes u([n]P) in form X/Z -MontgomeryCurve::PointXZ MontgomeryCurve::power_xz(const PointXZ& A, const Bignum& n) const { - assert(n >= 0); - if (n == 0) { - return PointXZ(ring); - } - - int k = n.num_bits(); - PointXZ P(A); - PointXZ Q(double_xz(P)); - for (int i = k - 2; i >= 0; --i) { - PointXZ PQ(add_xz(P, Q)); - PQ.X *= A.Z; - PQ.Z *= A.X; - if (n[i]) { - P = PQ; - Q = double_xz(Q); - } else { - Q = PQ; - P = double_xz(P); - } - } - return P; -} - -bool MontgomeryCurve::PointXZ::export_point_y(unsigned char buffer[32]) const { - if ((X + Z).is_zero()) { - std::memset(buffer, 0xff, 32); - return false; - } else { - get_y().extract().export_lsb(buffer, 32); - return true; - } -} - -bool MontgomeryCurve::PointXZ::export_point_u(unsigned char buffer[32]) const { - if (Z.is_zero()) { - std::memset(buffer, 0xff, 32); - return false; - } else { - get_u().extract().export_lsb(buffer, 32); - return true; - } -} - -MontgomeryCurve::PointXZ MontgomeryCurve::import_point_u(const unsigned char point[32]) const { - Bignum u; - u.import_lsb(point, 32); - u[255] = 0; - return PointXZ(Residue(u, ring)); -} - -MontgomeryCurve::PointXZ MontgomeryCurve::import_point_y(const unsigned char point[32]) const { - Bignum y; - y.import_lsb(point, 32); - y[255] = 0; - return PointXZ(Residue(y, ring), true); -} - -MontgomeryCurve& Curve25519() { - static MontgomeryCurve Curve25519(486662, 9, Fp25519()); - static bool init = false; - if (!init) { - Curve25519.set_order_cofactor(hex_string{"80000000000000000000000000000000a6f7cef517bce6b2c09318d2e7ae9f68"}, 8); - init = true; - } - return Curve25519; -} -} // namespace ellcurve - -// ****************************************************** - -namespace ellcurve { -using namespace arith; - -class TwEdwardsCurve; - -class TwEdwardsCurve { - public: - struct SegrePoint { - Residue XY, X, Y, Z; // if x=X/Z and y=Y/T, stores (xy,x,y,1)*Z*T - SegrePoint(ResidueRing& R) : XY(R), X(R), Y(R), Z(R) { - } - SegrePoint(const Residue& x, const Residue& y) : XY(x * y), X(x), Y(y), Z(y.ring_of().one()) { - } - SegrePoint(const TwEdwardsCurve& E, const Residue& y, bool x_sign); - SegrePoint(const SegrePoint& P) : XY(P.XY), X(P.X), Y(P.Y), Z(P.Z) { - } - SegrePoint& operator=(const SegrePoint& P) { - XY = P.XY; - X = P.X; - Y = P.Y; - Z = P.Z; - return *this; - } - bool is_zero() const { - return X.is_zero() && (Y == Z); - } - bool is_valid() const { - return (XY * Z == X * Y) && !(XY.is_zero() && X.is_zero() && Y.is_zero() && Z.is_zero()); - } - bool is_finite() const { - return !Z.is_zero(); - } - bool is_normalized() const { - return Z == Z.ring_of().one(); - } - SegrePoint& normalize() { - auto f = inverse(Z); - XY *= f; - X *= f; - Y *= f; - Z = Z.ring_of().one(); - return *this; - } - SegrePoint& zeroize() { - XY = X = Y = Z = Z.ring_of().zero(); - return *this; - } - bool export_point(unsigned char buffer[32], bool need_x = true) const; - bool export_point_y(unsigned char buffer[32]) const { - return export_point(buffer, false); - } - bool export_point_u(unsigned char buffer[32]) const; - Residue get_y() const { - return Y * inverse(Z); - } - Residue get_x() const { - return X * inverse(Z); - } - Residue get_u() const { - return (Z + Y) * inverse(Z - Y); - } - void negate() { - XY.negate(); - X.negate(); - } - }; - - private: - ResidueRing& ring; - Residue D; - Residue D2; - Residue Gy; - Bignum P; - Bignum L; - Bignum Order; - Bignum cofactor; - int cofactor_short; - SegrePoint G; - SegrePoint O; - void init(); - - public: - TwEdwardsCurve(const Residue& _D, const Residue& _Gy, ResidueRing& _R) - : ring(_R), D(_D), D2(_D + _D), Gy(_Gy), P(_R.get_modulus()), cofactor_short(0), G(_R), O(_R) { - init(); - } - - const Residue& get_gen_y() const { - return Gy; - } - const Bignum& get_ell() const { - return L; - } - const Bignum& get_order() const { - return Order; - } - ResidueRing& get_base_ring() const { - return ring; - } - const Bignum& get_p() const { - return P; - } - const SegrePoint& get_base_point() const { - return G; - } - - void set_order_cofactor(const Bignum& order, int cof); - bool recover_x(Residue& x, const Residue& y, bool x_sign) const; - - void add_points(SegrePoint& R, const SegrePoint& P, const SegrePoint& Q) const; - SegrePoint add_points(const SegrePoint& P, const SegrePoint& Q) const; - void double_point(SegrePoint& R, const SegrePoint& P) const; - SegrePoint double_point(const SegrePoint& P) const; - SegrePoint power_point(const SegrePoint& A, const Bignum& n) const; - SegrePoint power_gen(const Bignum& n) const; - - SegrePoint import_point(const unsigned char point[32], bool& ok) const; -}; - -std::ostream& operator<<(std::ostream& os, const TwEdwardsCurve::SegrePoint& P) { - return os << "[" << P.XY << ":" << P.X << ":" << P.Y << ":" << P.Z << "]"; -} - -void TwEdwardsCurve::init() { - assert(D != ring.zero() && D != ring.convert(-1)); - O.X = O.Z = ring.one(); - G = SegrePoint(*this, Gy, 0); - assert(!G.XY.is_zero()); -} - -void TwEdwardsCurve::set_order_cofactor(const Bignum& order, int cof) { - assert(order > 0); - assert(cof >= 0); - assert(cof == 0 || (order % cof) == 0); - Order = order; - cofactor = cofactor_short = cof; - if (cof > 0) { - L = order / cof; - assert(is_prime(L)); - assert(!power_gen(1).is_zero()); - assert(power_gen(L).is_zero()); - } -} - -TwEdwardsCurve::SegrePoint::SegrePoint(const TwEdwardsCurve& E, const Residue& y, bool x_sign) - : XY(y), X(E.get_base_ring()), Y(y), Z(E.get_base_ring().one()) { - Residue x(y.ring_of()); - if (E.recover_x(x, y, x_sign)) { - XY *= x; - X = x; - } else { - XY = Y = Z = E.get_base_ring().zero(); - } -} - -bool TwEdwardsCurve::recover_x(Residue& x, const Residue& y, bool x_sign) const { - // recovers x from equation -x^2+y^2 = 1+d*x^2*y^2 - Residue z = inverse(ring.one() + D * sqr(y)); - if (z.is_zero()) { - return false; - } - z *= sqr(y) - ring.one(); - Residue t = sqrt(z); - if (sqr(t) == z) { - x = (t.extract().odd() == x_sign) ? t : -t; - //std::cout << "x=" << x << ", y=" << y << std::endl; - return true; - } else { - return false; - } -} - -void TwEdwardsCurve::add_points(SegrePoint& Res, const SegrePoint& P, const SegrePoint& Q) const { - Residue a((P.X + P.Y) * (Q.X + Q.Y)); - Residue b((P.X - P.Y) * (Q.X - Q.Y)); - Residue c(P.Z * Q.Z * ring.convert(2)); - Residue d(P.XY * Q.XY * D2); - Residue x_num(a - b); // 2(x1y2+x2y1) - Residue y_num(a + b); // 2(x1x2+y1y2) - Residue x_den(c + d); // 2(1+dx1x2y1y2) - Residue y_den(c - d); // 2(1-dx1x2y1y2) - Res.X = x_num * y_den; // x = x_num/x_den, y = y_num/y_den - Res.Y = y_num * x_den; - Res.XY = x_num * y_num; - Res.Z = x_den * y_den; -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::add_points(const SegrePoint& P, const SegrePoint& Q) const { - SegrePoint Res(ring); - add_points(Res, P, Q); - return Res; -} - -void TwEdwardsCurve::double_point(SegrePoint& Res, const SegrePoint& P) const { - add_points(Res, P, P); -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::double_point(const SegrePoint& P) const { - SegrePoint Res(ring); - double_point(Res, P); - return Res; -} - -// computes u([n]P) in form (xy,x,y,1)*Z -TwEdwardsCurve::SegrePoint TwEdwardsCurve::power_point(const SegrePoint& A, const Bignum& n) const { - assert(n >= 0); - if (n == 0) { - return O; - } - - int k = n.num_bits(); - SegrePoint P(A); - SegrePoint Q(double_point(A)); - for (int i = k - 2; i >= 0; --i) { - if (n[i]) { - add_points(P, P, Q); - double_point(Q, Q); - } else { - // we do more operations than necessary for uniformicity - add_points(Q, P, Q); - double_point(P, P); - } - } - return P; -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::power_gen(const Bignum& n) const { - return power_point(G, n); -} - -bool TwEdwardsCurve::SegrePoint::export_point(unsigned char buffer[32], bool need_x) const { - if (!is_normalized()) { - if (Z.is_zero()) { - std::memset(buffer, 0xff, 32); - return false; - } - Residue f(inverse(Z)); - Bignum y((Y * f).extract()); - assert(!y[255]); - if (need_x) { - y[255] = (X * f).extract().odd(); - } - y.export_lsb(buffer, 32); - } else { - Bignum y(Y.extract()); - assert(!y[255]); - if (need_x) { - y[255] = X.extract().odd(); - } - y.export_lsb(buffer, 32); - } - return true; -} - -bool TwEdwardsCurve::SegrePoint::export_point_u(unsigned char buffer[32]) const { - if (Z == Y) { - std::memset(buffer, 0xff, 32); - return false; - } - Residue f(inverse(Z - Y)); - ((Z + Y) * f).extract().export_lsb(buffer, 32); - assert(!(buffer[31] & 0x80)); - return true; -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::import_point(const unsigned char point[32], bool& ok) const { - Bignum y; - y.import_lsb(point, 32); - bool x_sign = y[255]; - y[255] = 0; - Residue yr(y, ring); - Residue xr(ring); - ok = recover_x(xr, yr, x_sign); - return ok ? SegrePoint(xr, yr) : SegrePoint(ring); -} - -TwEdwardsCurve& Ed25519() { - static TwEdwardsCurve Ed25519(Fp25519().frac(-121665, 121666), Fp25519().frac(4, 5), Fp25519()); - static bool init = false; - if (!init) { - Ed25519.set_order_cofactor(hex_string{"80000000000000000000000000000000a6f7cef517bce6b2c09318d2e7ae9f68"}, 8); - init = true; - } - return Ed25519; -} -} // namespace ellcurve - -// ****************************************************** - -namespace openssl { -#include -} - -namespace digest { -using namespace openssl; - -struct OpensslEVP_SHA1 { - enum { digest_bytes = 20 }; - static const EVP_MD* get_evp() { - return EVP_sha1(); - } -}; - -struct OpensslEVP_SHA256 { - enum { digest_bytes = 32 }; - static const EVP_MD* get_evp() { - return EVP_sha256(); - } -}; - -struct OpensslEVP_SHA512 { - enum { digest_bytes = 64 }; - static const EVP_MD* get_evp() { - return EVP_sha512(); - } -}; - -template -class HashCtx { - EVP_MD_CTX ctx; - void init(); - void clear(); - - public: - enum { digest_bytes = H::digest_bytes }; - HashCtx() { - init(); - } - HashCtx(const void* data, std::size_t len) { - init(); - feed(data, len); - } - ~HashCtx() { - clear(); - } - void feed(const void* data, std::size_t len); - std::size_t extract(unsigned char buffer[digest_bytes]); - std::string extract(); -}; - -template -void HashCtx::init() { - EVP_MD_CTX_init(&ctx); - EVP_DigestInit_ex(&ctx, H::get_evp(), 0); -} - -template -void HashCtx::clear() { - EVP_MD_CTX_cleanup(&ctx); -} - -template -void HashCtx::feed(const void* data, std::size_t len) { - EVP_DigestUpdate(&ctx, data, len); -} - -template -std::size_t HashCtx::extract(unsigned char buffer[digest_bytes]) { - unsigned olen = 0; - EVP_DigestFinal_ex(&ctx, buffer, &olen); - assert(olen == digest_bytes); - return olen; -} - -template -std::string HashCtx::extract() { - unsigned char buffer[digest_bytes]; - unsigned olen = 0; - EVP_DigestFinal_ex(&ctx, buffer, &olen); - assert(olen == digest_bytes); - return std::string((char*)buffer, olen); -} - -typedef HashCtx SHA1; -typedef HashCtx SHA256; -typedef HashCtx SHA512; - -template -std::size_t hash_str(unsigned char buffer[T::digest_bytes], const void* data, std::size_t size) { - T hasher(data, size); - return hasher.extract(buffer); -} - -template -std::size_t hash_two_str(unsigned char buffer[T::digest_bytes], const void* data1, std::size_t size1, const void* data2, - std::size_t size2) { - T hasher(data1, size1); - hasher.feed(data2, size2); - return hasher.extract(buffer); -} - -template -std::string hash_str(const void* data, std::size_t size) { - T hasher(data, size); - return hasher.extract(); -} - -template -std::string hash_two_str(const void* data1, std::size_t size1, const void* data2, std::size_t size2) { - T hasher(data1, size1); - hasher.feed(data2, size2); - return hasher.extract(); -} -} // namespace digest - -// ****************************************************** - -namespace openssl { -#include -} - -#include -#include - -namespace prng { - -int os_get_random_bytes(void* buf, int n) { - using namespace std; - int r = 0, h = open("/dev/random", O_RDONLY | O_NONBLOCK); - if (h >= 0) { - r = read(h, buf, n); - if (r > 0) { - //std::cerr << "added " << r << " bytes of real entropy to secure random numbers seed" << std::endl; - } else { - r = 0; - } - close(h); - } - - if (r < n) { - h = open("/dev/urandom", O_RDONLY); - if (h < 0) { - return r; - } - int s = read(h, (char*)buf + r, n - r); - close(h); - if (s < 0) { - return r; - } - r += s; - } - - if (r >= 8) { - *(long*)buf ^= lrand48(); - srand48(*(long*)buf); - } - - return r; -} -} // namespace prng - -namespace prng { -using namespace openssl; - -class RandomGen { - public: - struct rand_error {}; - void randomize(bool force = true); - void seed_add(const void* data, std::size_t size, double entropy = 0); - bool ok() const { - return RAND_status(); - } - RandomGen() { - randomize(false); - } - RandomGen(const void* seed, std::size_t size) { - seed_add(seed, size); - randomize(false); - } - bool rand_bytes(void* data, std::size_t size, bool strong = false); - bool strong_rand_bytes(void* data, std::size_t size) { - return rand_bytes(data, size, true); - } - template - bool rand_obj(T& obj) { - return rand_bytes(&obj, sizeof(T)); - } - template - bool rand_objs(T* ptr, std::size_t count) { - return rand_bytes(ptr, sizeof(T) * count); - } - std::string rand_string(std::size_t size, bool strong = false); -}; - -void RandomGen::seed_add(const void* data, std::size_t size, double entropy) { - RAND_add(data, size, entropy > 0 ? entropy : size); -} - -void RandomGen::randomize(bool force) { - if (!force && ok()) { - return; - } - unsigned char buffer[128]; - int n = os_get_random_bytes(buffer, 128); - seed_add(buffer, n); - assert(ok()); -} - -bool RandomGen::rand_bytes(void* data, std::size_t size, bool strong) { - int res = (strong ? RAND_bytes : RAND_pseudo_bytes)((unsigned char*)data, size); - if (res != 0 && res != 1) { - throw rand_error(); - } - return res; -} - -std::string RandomGen::rand_string(std::size_t size, bool strong) { - char buffer[size]; - if (!rand_bytes(buffer, size, strong)) { - throw rand_error(); - } - return std::string(buffer, size); -} - -RandomGen& rand_gen() { - static RandomGen MainPRNG; - return MainPRNG; -} - -} // namespace prng - -// ****************************************************** - -namespace crypto { -namespace Ed25519 { - -const int privkey_bytes = 32; -const int pubkey_bytes = 32; -const int sign_bytes = 64; -const int shared_secret_bytes = 32; - -bool all_bytes_same(const unsigned char* str, std::size_t size) { - unsigned char c = str[0]; - for (std::size_t i = 0; i < size; i++) { - if (str[i] != c) { - return false; - } - } - return true; -} - -class PublicKey { - enum { pk_empty, pk_xz, pk_init } inited; - unsigned char pubkey[pubkey_bytes]; - ellcurve::TwEdwardsCurve::SegrePoint PubKey; - ellcurve::MontgomeryCurve::PointXZ PubKey_xz; - - public: - PublicKey() : inited(pk_empty), PubKey(ellcurve::Fp25519()), PubKey_xz(ellcurve::Fp25519()) { - } - PublicKey(const unsigned char pub_key[pubkey_bytes]); - PublicKey(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key); - - bool import_public_key(const unsigned char pub_key[pubkey_bytes]); - bool import_public_key(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key); - bool export_public_key(unsigned char pubkey_buffer[pubkey_bytes]) const; - bool check_message_signature(unsigned char signature[sign_bytes], const unsigned char* message, std::size_t msg_size); - - void clear(); - bool ok() const { - return inited == pk_init; - } - - const unsigned char* get_pubkey_ptr() const { - return inited == pk_init ? pubkey : 0; - } - const ellcurve::TwEdwardsCurve::SegrePoint& get_point() const { - return PubKey; - } - const ellcurve::MontgomeryCurve::PointXZ& get_point_xz() const { - return PubKey_xz; - } -}; - -void PublicKey::clear(void) { - if (inited != pk_empty) { - std::memset(pubkey, 0, pubkey_bytes); - PubKey.zeroize(); - PubKey_xz.zeroize(); - } - inited = pk_empty; -} - -PublicKey::PublicKey(const unsigned char pub_key[pubkey_bytes]) - : inited(pk_empty), PubKey(ellcurve::Fp25519()), PubKey_xz(ellcurve::Fp25519()) { - import_public_key(pub_key); -} - -PublicKey::PublicKey(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key) - : inited(pk_empty), PubKey(ellcurve::Fp25519()), PubKey_xz(ellcurve::Fp25519()) { - import_public_key(Pub_Key); -} - -bool PublicKey::import_public_key(const unsigned char pub_key[pubkey_bytes]) { - clear(); - if (all_bytes_same(pub_key, pubkey_bytes)) { - return false; - } - bool ok = false; - PubKey = ellcurve::Ed25519().import_point(pub_key, ok); - if (!ok) { - clear(); - return false; - } - std::memcpy(pubkey, pub_key, pubkey_bytes); - PubKey_xz.X = PubKey.Z + PubKey.Y; - PubKey_xz.Z = PubKey.Z - PubKey.Y; - inited = pk_init; - return true; -} - -bool PublicKey::import_public_key(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key) { - clear(); - if (!Pub_Key.is_valid()) { - return false; - } - PubKey = Pub_Key; - PubKey_xz.X = PubKey.Z + PubKey.Y; - PubKey_xz.Z = PubKey.Z - PubKey.Y; - inited = pk_init; - - if (!PubKey.export_point(pubkey)) { - clear(); - return false; - } - return true; -} - -bool PublicKey::export_public_key(unsigned char pubkey_buffer[pubkey_bytes]) const { - if (inited != pk_init) { - std::memset(pubkey_buffer, 0, pubkey_bytes); - return false; - } else { - std::memcpy(pubkey_buffer, pubkey, pubkey_bytes); - return true; - } -} - -bool PublicKey::check_message_signature(unsigned char signature[sign_bytes], const unsigned char* message, - std::size_t msg_size) { - if (inited != pk_init) { - return false; - } - unsigned char hash[64]; - { - digest::SHA512 hasher(signature, 32); - hasher.feed(pubkey, 32); - hasher.feed(message, msg_size); - hasher.extract(hash); - } - auto& E = ellcurve::Ed25519(); - const arith::Bignum& L = E.get_ell(); - arith::Bignum H, S; - S.import_lsb(signature + 32, 32); - H.import_lsb(hash, 64); - H %= L; - H = L - H; - auto sG = E.power_gen(S); - auto hA = E.power_point(PubKey, H); - auto pR1 = E.add_points(sG, hA); - unsigned char pR1_bytes[32]; - if (!pR1.export_point(pR1_bytes)) { - return false; - } - return !std::memcmp(pR1_bytes, signature, 32); -} - -class PrivateKey { - public: - struct priv_key_no_copy {}; - PrivateKey() : inited(false) { - std::memset(privkey, 0, privkey_bytes); - } - PrivateKey(const unsigned char pk[privkey_bytes]) : inited(false) { - std::memset(privkey, 0, privkey_bytes); - import_private_key(pk); - } - ~PrivateKey() { - clear(); - } - bool random_private_key(bool strong = false); - bool import_private_key(const unsigned char pk[privkey_bytes]); - bool export_private_key(unsigned char pk[privkey_bytes]) const; // careful! - bool export_public_key(unsigned char pubk[pubkey_bytes]) const { - return PubKey.export_public_key(pubk); - } - void clear(); - bool ok() const { - return inited; - } - - // used for EdDSA (sign) - bool sign_message(unsigned char signature[sign_bytes], const unsigned char* message, std::size_t msg_size); - // used for ECDH (encrypt / decrypt) - bool compute_shared_secret(unsigned char secret[shared_secret_bytes], const PublicKey& Pub); - // used for EC asymmetric decryption - bool compute_temp_shared_secret(unsigned char secret[shared_secret_bytes], - const unsigned char temp_pub_key[pubkey_bytes]); - - const PublicKey& get_public_key() const { - return PubKey; - } - - private: - bool inited; - unsigned char privkey[privkey_bytes]; - unsigned char priv_salt[32]; - arith::Bignum priv_exp; - PublicKey PubKey; - - bool process_private_key(); - PrivateKey(const PrivateKey&) { - throw priv_key_no_copy(); - } - PrivateKey& operator=(const PrivateKey&) { - throw priv_key_no_copy(); - } -}; - -bool PrivateKey::random_private_key(bool strong) { - inited = false; - if (!prng::rand_gen().rand_bytes(privkey, privkey_bytes, strong)) { - clear(); - return false; - } - return process_private_key(); -} - -void PrivateKey::clear(void) { - std::memset(privkey, 0, privkey_bytes); - std::memset(priv_salt, 0, sizeof(priv_salt)); - priv_exp.clear(); - PubKey.clear(); - inited = false; -} - -bool PrivateKey::import_private_key(const unsigned char pk[privkey_bytes]) { - clear(); - if (all_bytes_same(pk, privkey_bytes)) { - return false; - } - std::memcpy(privkey, pk, privkey_bytes); - return process_private_key(); -} - -bool PrivateKey::export_private_key(unsigned char pk[privkey_bytes]) const { // careful! - if (!inited) { - std::memset(pk, 0, privkey_bytes); - return false; - } else { - std::memcpy(pk, privkey, privkey_bytes); - return true; - } -} - -bool PrivateKey::process_private_key() { - unsigned char buff[64]; - digest::hash_str(buff, privkey, privkey_bytes); - std::memcpy(priv_salt, buff + 32, 32); - buff[0] &= -8; - buff[31] = ((buff[31] | 0x40) & ~0x80); - priv_exp.import_lsb(buff, 32); - PubKey = ellcurve::Ed25519().power_gen(priv_exp); - inited = PubKey.ok(); - if (!inited) { - clear(); - } - return inited; -} - -bool PrivateKey::compute_shared_secret(unsigned char secret[shared_secret_bytes], const PublicKey& Pub) { - if (!inited || !Pub.ok()) { - std::memset(secret, 0, shared_secret_bytes); - *(long*)secret = lrand48(); - return false; - } - auto P = ellcurve::Curve25519().power_xz(Pub.get_point_xz(), priv_exp); - if (P.is_infty()) { - std::memset(secret, 0, shared_secret_bytes); - *(long*)secret = lrand48(); - return false; - } - P.export_point_y(secret); - return true; -} - -bool PrivateKey::compute_temp_shared_secret(unsigned char secret[shared_secret_bytes], - const unsigned char temp_pub_key[pubkey_bytes]) { - PublicKey tempPubkey(temp_pub_key); - if (!tempPubkey.ok()) { - return false; - } - return compute_shared_secret(secret, tempPubkey); -} - -bool PrivateKey::sign_message(unsigned char signature[sign_bytes], const unsigned char* message, std::size_t msg_size) { - if (!inited) { - std::memset(signature, 0, sign_bytes); - return false; - } - unsigned char r_bytes[64]; - digest::hash_two_str(r_bytes, priv_salt, 32, message, msg_size); - const arith::Bignum& L = ellcurve::Ed25519().get_ell(); - arith::Bignum eR; - eR.import_lsb(r_bytes, 64); - eR %= L; - - auto pR = ellcurve::Ed25519().power_gen(eR); - - assert(pR.export_point(signature, 32)); - { - digest::SHA512 hasher(signature, 32); - hasher.feed(PubKey.get_pubkey_ptr(), 32); - hasher.feed(message, msg_size); - hasher.extract(r_bytes); - } - arith::Bignum S; - S.import_lsb(r_bytes, 64); - S %= L; - S *= priv_exp; - S += eR; - S %= L; - S.export_lsb(signature + 32, 32); - return true; -} - -// use one TempKeyGenerator object a lot of times -class TempKeyGenerator { - enum { salt_size = 64 }; - unsigned char random_salt[salt_size]; - unsigned char buffer[privkey_bytes]; - - public: - TempKeyGenerator() { - prng::rand_gen().strong_rand_bytes(random_salt, salt_size); - } - ~TempKeyGenerator() { - std::memset(random_salt, 0, salt_size); - std::memset(buffer, 0, privkey_bytes); - } - - unsigned char* get_temp_private_key(unsigned char* to, const unsigned char* message, std::size_t size, - const unsigned char* rand = 0, std::size_t rand_size = 0); // rand may be 0 - void create_temp_private_key(PrivateKey& pk, const unsigned char* message, std::size_t size, - const unsigned char* rand = 0, std::size_t rand_size = 0); - - // sets temp_pub_key and shared_secret for one-time asymmetric encryption of message - bool create_temp_shared_secret(unsigned char temp_pub_key[pubkey_bytes], unsigned char secret[shared_secret_bytes], - const PublicKey& recipientPubKey, const unsigned char* message, std::size_t size, - const unsigned char* rand = 0, std::size_t rand_size = 0); -}; - -unsigned char* TempKeyGenerator::get_temp_private_key(unsigned char* to, const unsigned char* message, std::size_t size, - const unsigned char* rand, - std::size_t rand_size) { // rand may be 0 - digest::SHA256 hasher(message, size); - hasher.feed(random_salt, salt_size); - if (rand && rand_size) { - hasher.feed(rand, rand_size); - } - if (!to) { - to = buffer; - } - hasher.extract(to); - //++ *((long *)random_salt); - return to; -} - -void TempKeyGenerator::create_temp_private_key(PrivateKey& pk, const unsigned char* message, std::size_t size, - const unsigned char* rand, std::size_t rand_size) { - pk.import_private_key(get_temp_private_key(buffer, message, size, rand, rand_size)); - std::memset(buffer, 0, privkey_bytes); -} - -bool TempKeyGenerator::create_temp_shared_secret(unsigned char temp_pub_key[pubkey_bytes], - unsigned char shared_secret[shared_secret_bytes], - const PublicKey& recipientPubKey, const unsigned char* message, - std::size_t size, const unsigned char* rand, std::size_t rand_size) { - PrivateKey tmpPk; - create_temp_private_key(tmpPk, message, size, rand, rand_size); - return tmpPk.export_public_key(temp_pub_key) && tmpPk.compute_shared_secret(shared_secret, recipientPubKey); -} - -} // namespace Ed25519 -} // namespace crypto - -// ****************************************************** - -void print_buffer(const unsigned char buffer[32]) { - for (int i = 0; i < 32; i++) { - char buff[4]; - sprintf(buff, "%02x", buffer[i]); - std::cout << buff; - } -} - -std::string buffer_to_hex(const unsigned char* buffer, std::size_t size = 32) { - char str[2 * size + 1]; - for (std::size_t i = 0; i < size; i++) { - sprintf(str + 2 * i, "%02x", buffer[i]); - } - return str; -} - -int main(void) { - arith::Bignum x = (3506824292LL << 31); - x = (2948877059LL << 31); - arith::Bignum L = (((36 * x + 36) * x + 18) * x + 6) * x + 1; - arith::Bignum P = L + 6 * sqr(x); - std::cout << "x= " << x << "; l= " << L << "; p= " << P << std::endl; - std::cout << "x= " << x.to_hex() << "; l= " << L.to_hex() << "; p= " << P.to_hex() << std::endl; - std::cout << "x mod 3=" << x % 3 << "; p mod 9=" << P % 9 << "; x/2^31=" << (x >> 31).to_hex() << "=" << (x >> 31) - << std::endl; - - crypto::Ed25519::PrivateKey PK1, PK2, PK3; - PK1.random_private_key(); - PK2.random_private_key(); - unsigned char priv2_export[32]; - bool ok = PK2.export_private_key(priv2_export); - std::cout << "PK2 = " << ok << " " << buffer_to_hex(priv2_export) << std::endl; - PK3.import_private_key(priv2_export); - std::cout << "PK3 = " << PK3.ok() << std::endl; - - unsigned char pub_export[32]; - ok = PK1.export_public_key(pub_export); - std::cout << "PubK1 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - crypto::Ed25519::PublicKey PubK1(pub_export); - ok = PK2.export_public_key(pub_export); - std::cout << "PubK2 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - crypto::Ed25519::PublicKey PubK2(pub_export); - ok = PK3.export_public_key(pub_export); - std::cout << "PubK3 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - crypto::Ed25519::PublicKey PubK3(pub_export); - ok = PubK1.export_public_key(pub_export); - std::cout << "PubK1 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - - unsigned char secret12[32], secret21[32]; - ok = PK1.compute_shared_secret(secret12, PK3.get_public_key()); - std::cout << "secret(PK1,PubK2)=" << ok << " " << buffer_to_hex(secret12) << std::endl; - ok = PK2.compute_shared_secret(secret21, PubK1); - std::cout << "secret(PK2,PubK1)=" << ok << " " << buffer_to_hex(secret21) << std::endl; - - unsigned char signature[64]; - ok = PK1.sign_message(signature, (const unsigned char*)"abc", 3); - std::cout << "PK1.signature=" << ok << " " << buffer_to_hex(signature) << std::endl; - - // signature[63] ^= 1; - ok = PubK1.check_message_signature(signature, (const unsigned char*)"abc", 3); - std::cout << "PubK1.check_signature=" << ok << std::endl; - - unsigned char temp_pubkey[32]; - crypto::Ed25519::TempKeyGenerator TKG; // use one generator a lot of times - - TKG.create_temp_shared_secret(temp_pubkey, secret12, PubK1, (const unsigned char*)"abc", 3); - std::cout << "secret12=" << buffer_to_hex(secret12) << "; temp_pubkey=" << buffer_to_hex(temp_pubkey) << std::endl; - - PK1.compute_temp_shared_secret(secret21, temp_pubkey); - std::cout << "secret21=" << buffer_to_hex(secret21) << std::endl; -} diff --git a/test/regression-tests.ans b/test/regression-tests.ans index e2057210..0e07e3d6 100644 --- a/test/regression-tests.ans +++ b/test/regression-tests.ans @@ -13,7 +13,7 @@ Test_Fift_test_bls_ops_default fb0a81d4f247ab806318b051d12158f7f1aacc5513db5f8cb Test_Fift_test_deep_stack_ops_default df812efbadfffa8a3f553416f68c8c4435bac07266f84562cf98fe5f0dd62a52 Test_Fift_test_default 4e44b3382963ec89f7b5c8f2ebd85da3bc8aebad5b49f5b11b14075061477b4d Test_Fift_test_dict_default a9c8cbcfdece5573185022cea07f59f1bc404e5d879e5157a5745757f8ee0525 -Test_Fift_test_disasm_default dacaa555f5f217b2373e01e3bcd59634e480f5759dcc43edbdef35273ae38492 +Test_Fift_test_disasm_default 412cf37d37c5d9d81f44dbf4e3d3e7cda173c23b890614eb8a3bc5f2b92f13e6 Test_Fift_test_fiftext_default 2b0db5d4d4bfbc705b959cc787540d7b3a21a71469eac54756e76953f0d9afca Test_Fift_test_fixed_default 278a19d56b773102caf5c1fe2997ea6c8d0d9e720eff8503feede6398a197eec Test_Fift_test_hash_ext_default 686fc5680feca5b3bb207768215b27f6872a95128762dee0d7f2c88bc492d62d @@ -37,40 +37,40 @@ Test_Fift_testvm8_default 17c9e2205ccecfd8549328b4a501d07dde0336899a7a496e747e10 Test_Fift_testvm_default ee4cbfec76c050b6de7877cfc39817d594cd1e175b6265b76fb642e30b940437 Test_Fift_testvmprog_default e5d0b2c68ee568280877c8495be558bfd0054ca5d99a99eebb525bbeca8a65af Test_RefInt_main_default 768493e0aef8e09a401a6d369edd1ef503a9215fb09dc460f52b27a8bde767cb -Test_VM_assert_code_not_null_default 05bc07e129181c972b976442f200de9487dee8bfb5ac53dd36ff61c5d4d4291d -Test_VM_assert_extract_minmax_key_default c352309c61bdf62ba7a0ba7280d303c88b0696fe7efa550c05feb2c662275297 -Test_VM_assert_lookup_prefix_default c5b45999b46d324e4008c07e5ce671bbcd833f4e15fb21a4a5136f7b980ca6fc -Test_VM_assert_pfx_dict_lookup_default fa6e3f96b31cf2ed9a9dac6b279ec05acfedf13b8ed7b815789f167d1ed7352f +Test_VM_assert_code_not_null_default 09f75cb845e0df27f3ec92405ccb4018484711a79813fd47fe8e158762c1cb93 +Test_VM_assert_extract_minmax_key_default 756337c2b2ce489243956a6608d6934ba9f76124a9435f045fc3a3b65c113d41 +Test_VM_assert_lookup_prefix_default f7683f9d2010bca53b1ef20c0e82427fb04ed62fa5fea1ee986f005ecfc9a27a +Test_VM_assert_pfx_dict_lookup_default 6d7c80d94dbc6d3ae4bafa216667b95ede4f2cbd44a59384abace84270417ef8 Test_VM_bigint_default feeb473a4ac51133989e1c145d0f49defa77117d2ae8b66bd7d12e3579e91b9f -Test_VM_bug_div_short_any_default f69aca6873f75d53dd37b6952151a2d858407a04589330762827dbc96d8b7c04 -Test_VM_bug_exec_dict_getnear_default db314c2e25b49c1f7f044d271e225f36da546c66242a8ab12f6afae37628a81e -Test_VM_bug_stack_overflow_default 7e0e3e96ca438ac96648d569c55213aa82154cf004e80265b1c481b1c4219719 -Test_VM_infinity_loop_1_default 670beda76229922806805e558d50d8f320017c642c3e7e34a7e1f2b7edb83cee -Test_VM_infinity_loop_2_default 22d9bd8cb41ff7b6cced5825e4ab73275b2fc07b1e3cd4588de815e2e6df2963 -Test_VM_memory_leak_default e10dc118f3538720a16bcbd39be9a68c3ea07f76b3d2ed5719a5e866d91f0ab3 -Test_VM_memory_leak_new_default fd2eec0a1d5ae49fb5ff8ba4b938fd9d0fe330be4a07b2b8be12bab249b00d90 -Test_VM_memory_leak_old_default f3076ae38d14000c021597b824d2f0e51de4f00601429ec3e23cca1b32dba844 -Test_VM_oom_1_default 90862ddf3270840fbc9263c003c628ddd4a8bf6548b9bd3d53eb35a5c34bc325 -Test_VM_report3_1_default 7bc6a8e66f9a0e40cd131e9829ff36fed16b464170d27c0b365a3f549df57282 -Test_VM_report3_2_default 2231bc352cf431e72a84abad2261969bd5b0ee3d9051bb7a53b69fd8ea05f951 -Test_VM_report3_3_default 9416187eb0600ed247795837ca820bccaffb39841bd9d2ff625816bfbba35d6d -Test_VM_report3_4_default 11661eb00ea37c68e3483a8e048f922f73070c6da8219247663e3d6471c5c0cc -Test_VM_report3_6_default 1d7be98a8b07f803e80168247459e620ce4b91df634ad896e878d21a3ed757c0 -Test_VM_report3_int_overflow_1_default a0c2414ca2c9672d54409ee375a6aad6e2233306eaa3dfd33a82de3c90bba3ba -Test_VM_report3_int_overflow_2_default 01cd461802e532a6830705ad50eaa1760278737ff7beeb654e3c59ceb4aa8e2e -Test_VM_report3_loop_1_default b28b35d057a1b4fa2282d6f422ecd822b18cc4344733d923ef7b002f64bc4d72 -Test_VM_report3_loop_2_default 9f8236535902b04e403d412fcf1f79e64d0f2eb25b3cc014b7d61b2d7a34b9ef -Test_VM_report3_loop_3_default 7ee05ea553c48a2476035817b9d860f614a355927c9e011b2f824dc6e5f7b0cf -Test_VM_report3_loop_4_default 4b6c2f65fda3c9e9c6660b6cbbcb1b2103c5b52870cb5daa8876bbed0ca9bbc9 -Test_VM_report3_loop_5_default 0d5d504884172ef8513757d7f6b2a3870dbd28efd5960857441c032e1c67d836 -Test_VM_report3_loop_6_default 5c35b92144debdb61b2020d690669bffbdd96f75ecde827fd0c75c05da22b5a0 -Test_VM_report3_qnot_default dc280444c7e3886cc3412f96f44c803c45287a07fcb9c638643e21bcdfe3905d -Test_VM_simple_default f6733549069427c2beb1a85ee25635540e27aa68cb8ad101d8435e19afeae862 -Test_VM_unhandled_exception_1_default 0abe2740dd3b6a6b91eb67fee573f638086fecc72653d2d81c956782186b5d78 -Test_VM_unhandled_exception_2_default 5ca67db5a0e957cc106bb47b744177ca959632a352f3629df376c34cbf03d51b -Test_VM_unhandled_exception_3_default b354e897e35a1177fd66d2c6ad7d77ae33a4e18f8678a518d79fea1388853307 -Test_VM_unhandled_exception_4_default 412cbfe13745fde55cdcc5e41d7b15ba4d09f0e723f8e4421ae0b7066ca07b8f -Test_VM_unhandled_exception_5_default d760e540cd9c200c207f71c540cdaf06d11c96e32ec19860b9c1046cb1e38855 +Test_VM_bug_div_short_any_default 49c9588b2b25b08979016f8b7ca42ae9fa4904a1dc6a2093a7dae6dce0cdf42f +Test_VM_bug_exec_dict_getnear_default 0b0cb6c1fef773f8b5a4aab8d575ba941f3b85dd449f85051296954028e59781 +Test_VM_bug_stack_overflow_default 31950eb2ed62bd1ce1c18e0031a81390ff3a3feee61ff23a09181995917137d0 +Test_VM_infinity_loop_1_default 6b8cc0ff85efa6882ffdf6e9e4333967976a29c4ce32a25b42c4c53370ad3024 +Test_VM_infinity_loop_2_default 4be08957dc86dfde3dfadd8c2f961ef2f1fa839788bbf7affea754115cee9e18 +Test_VM_memory_leak_default da588f89f3bc3ef7496bbab61e2d993f0c84bba80bb28d9c20c6eac0f7f57dd3 +Test_VM_memory_leak_new_default d25e8602c88c454ded6271d0f7afbc556820cc9942c56de9e0bd95b329f8783e +Test_VM_memory_leak_old_default 563f5a02130f231823099985c77d09913db07d2d8782edf431822f6afe4911b8 +Test_VM_oom_1_default 354934173c82e4f7bca5063846abd35cb47c4fcf1c3ba8c2fd04a4b186fcbf18 +Test_VM_report3_1_default 26bb43b5100e94791911a66226ec6545422749e0ea9e6279983b00ef4b506601 +Test_VM_report3_2_default 07a84726217f362fd71b3ceab96112ffc7aa40ed44636cf0128205d85798c66c +Test_VM_report3_3_default 0d9a92491c88ada92283691debd123724db2b7c1bd345200a53c057032e9cc07 +Test_VM_report3_4_default eb23c8e1219aed91b1b4f519efcac87018a5cf8e0ce473cfa641f8221f3c5d20 +Test_VM_report3_6_default 769ce8f9bb6fb9b8619afdb8e9d621b6199466f07c37eeea8edf3c21bf05a101 +Test_VM_report3_int_overflow_1_default 7aaf32ec7ace54b93d6b55f3ac9642572f348ebd64412afdda24849f8e4eb1dc +Test_VM_report3_int_overflow_2_default 572d197681654c94951280448ea3cf448613391633383c2424d719b03b6ec939 +Test_VM_report3_loop_1_default c9b00b32a024c65e0a8019c86e94ee365a823ff26e2420e1797902841abab57c +Test_VM_report3_loop_2_default 3654949987ddb44d8e11e84fe907d43707eaed910b9d0ad15dd68b531df1444a +Test_VM_report3_loop_3_default c1fc7e0d160b334fe8a4735a2a9d36c3b10530edaad5c1859df88382ff82a2d9 +Test_VM_report3_loop_4_default 5ad7cc51a6e553ee3d4a427229908a51692e117624838190311c7023df7a5e5b +Test_VM_report3_loop_5_default 068f81caecc344132a601259d9f73eea7089b1399793661ba1954483e0d5682c +Test_VM_report3_loop_6_default bf2e45709fceeed0192ec34af618cba3b85b90f71071e018afba686167618a90 +Test_VM_report3_qnot_default 7fcbda7e3fc4853a36e6b02e9d346f039690b1879d40850f561ea4123452d3ec +Test_VM_simple_default c96d70ea853828c89cd38fcf22543289335f3086a53301a1d0f186753ba9975b +Test_VM_unhandled_exception_1_default 80fe0e4c2ae19ae73e67e4355548d0afa59ea286be2d75a91db4529618dba008 +Test_VM_unhandled_exception_2_default 1362ba3a6ddbf5a30aba07ad58e8c24b0f85bdc53525e3eaa27af7248c62525a +Test_VM_unhandled_exception_3_default e381ce751cbd0e2994d7f60df7746b9ed7783198cfbcb31dccf02fafe68b6733 +Test_VM_unhandled_exception_4_default 51dd501ec0514f3b388145761b252f09d6ef3e831ea450605ae30511688dd708 +Test_VM_unhandled_exception_5_default 8231cfe1fb6ce6107b592f2c8f6a4eae0d123fc399163c81e8e0d5228b68bc91 Test_base64_main_default e90d541bd810871c4a81e162f1fffb555024b72807cb895414d16bc11494b789 Test_bigexp_main_default 45a1f51fb2abcc1ebf8569e1a57bebee04c334a15e03535ff5869bc9a9db8956 Test_bits256_scan_main_default 3ec7434e1cabc8e08eb2e79064e67747ffbfed177473c7873b88c144a7ed6f42 diff --git a/test/test-adnl.cpp b/test/test-adnl.cpp index d9ae4abe..85e965a4 100644 --- a/test/test-adnl.cpp +++ b/test/test-adnl.cpp @@ -52,7 +52,7 @@ int main() { td::to_integer_safe("0").ensure(); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-adnl"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-catchain.cpp b/test/test-catchain.cpp index 53b32924..149ea3e3 100644 --- a/test/test-catchain.cpp +++ b/test/test-catchain.cpp @@ -219,7 +219,7 @@ int main(int argc, char *argv[]) { SET_VERBOSITY_LEVEL(verbosity_INFO); td::set_default_failure_signal_handler().ensure(); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-catchain"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); @@ -274,8 +274,6 @@ int main(int argc, char *argv[]) { } }); - auto t = td::Timestamp::in(1.0); - ton::catchain::CatChainSessionId unique_id; td::Random::secure_bytes(unique_id.as_slice()); @@ -287,7 +285,7 @@ int main(int argc, char *argv[]) { } }); - t = td::Timestamp::in(10.0); + auto t = td::Timestamp::in(10.0); while (scheduler.run(1)) { if (t.is_in_past()) { break; @@ -300,7 +298,7 @@ int main(int argc, char *argv[]) { scheduler.run_in_context([&] { td::actor::send_closure(inst[0], &CatChainInst::create_fork); }); - t = td::Timestamp::in(10.0); + t = td::Timestamp::in(1.0); while (scheduler.run(1)) { if (t.is_in_past()) { break; diff --git a/test/test-dht.cpp b/test/test-dht.cpp index 2391fd9c..8d814f6e 100644 --- a/test/test-dht.cpp +++ b/test/test-dht.cpp @@ -41,7 +41,7 @@ int main() { SET_VERBOSITY_LEVEL(verbosity_INFO); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-dht"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-ext-client.cpp b/test/test-ext-client.cpp deleted file mode 100644 index a1187d78..00000000 --- a/test/test-ext-client.cpp +++ /dev/null @@ -1,220 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "adnl/adnl-ext-client.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -template -std::ostream &operator<<(std::ostream &stream, const td::UInt &x) { - for (size_t i = 0; i < size / 8; i++) { - stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15); - } - - return stream; -} - -class TestNode : public td::actor::Actor { - private: - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - td::actor::ActorOwn client_; - - std::unique_ptr make_callback() { - class Callback : public ton::adnl::AdnlExtClient::Callback { - public: - void on_ready() override { - td::actor::send_closure(id_, &TestNode::conn_ready); - } - void on_stop_ready() override { - td::actor::send_closure(id_, &TestNode::conn_closed); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - bool ready_ = false; - std::string db_root_; - - public: - void conn_ready() { - LOG(ERROR) << "conn ready"; - ready_ = true; - } - void conn_closed() { - ready_ = false; - } - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void set_db_root(std::string db_root) { - db_root_ = db_root; - } - void start_up() override { - } - void alarm() override { - if (ready_ && !client_.empty()) { - LOG(ERROR) << "sending query"; - auto P = td::PromiseCreator::lambda([](td::Result R) { - if (R.is_error()) { - LOG(ERROR) << "failed query: " << R.move_as_error(); - return; - } - auto F = ton::fetch_tl_object(R.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "failed to pasrse answer: " << F.move_as_error(); - return; - } - auto obj = F.move_as_ok(); - LOG(ERROR) << "got answer: " << ton::ton_api::to_string(obj); - }); - td::BufferSlice b = ton::serialize_tl_object(ton::create_tl_object(), true); - td::actor::send_closure(client_, &ton::adnl::AdnlExtClient::send_query, "query", std::move(b), - td::Timestamp::in(10.0), std::move(P)); - } - alarm_timestamp() = td::Timestamp::in(2.0); - } - TestNode() { - } - void run() { - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - CHECK(gc.liteclients_.size() > 0); - auto &cli = gc.liteclients_[0]; - td::IPAddress addr; - addr.init_host_port(td::IPAddress::ipv4_to_str(cli->ip_), cli->port_).ensure(); - - client_ = ton::adnl::AdnlExtClient::create(ton::adnl::AdnlNodeIdFull::create(cli->id_).move_as_ok(), addr, - make_callback()); - alarm_timestamp() = td::Timestamp::in(2.0); - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_DEBUG); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('D', "db", "root for dbs", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_db_root, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({2}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-ext-server.cpp b/test/test-ext-server.cpp deleted file mode 100644 index b4b78728..00000000 --- a/test/test-ext-server.cpp +++ /dev/null @@ -1,221 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -template -std::ostream &operator<<(std::ostream &stream, const td::UInt &x) { - for (size_t i = 0; i < size / 8; i++) { - stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15); - } - - return stream; -} - -class TestNode : public td::actor::Actor { - private: - td::actor::ActorOwn keyring_; - td::actor::ActorOwn adnl_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - std::unique_ptr make_callback() { - class Callback : public ton::adnl::Adnl::Callback { - public: - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, - td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::adnl_receive_message, src, dst, std::move(data)); - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::adnl_receive_query, src, dst, std::move(data), std::move(promise)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - std::string db_root_; - - public: - void adnl_receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data) { - LOG(ERROR) << "ADNL MESSAGE FROM " << src << ": size=" << data.size() << "\n"; - } - - void adnl_receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) { - LOG(ERROR) << "ADNL QUERY FROM " << src << ": size=" << data.size() << "\n"; - promise.set_value(ton::serialize_tl_object(ton::create_tl_object("xxx"), true)); - } - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void set_db_root(std::string db_root) { - db_root_ = db_root; - } - void start_up() override { - } - void alarm() override { - } - TestNode() { - } - void run() { - keyring_ = ton::keyring::Keyring::create(db_root_ + "/keyring/"); - adnl_ = ton::adnl::Adnl::create(db_root_, keyring_.get()); - - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - //td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - for (auto &x : lc.liteservers_) { - auto pk = ton::PrivateKey{x->id_}; - auto pub_k = ton::adnl::AdnlNodeIdFull{pk.compute_public_key()}; - auto id = pub_k.compute_short_id(); - - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, pub_k, ton::adnl::AdnlAddressList{}); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, id, - ton::adnl::Adnl::int_to_bytestring(ton::ton_api::getTestObject::ID), make_callback()); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ext_local_id, id); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ext_tcp_port, static_cast(x->port_)); - } - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_DEBUG); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('D', "db", "root for dbs", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_db_root, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({2}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-node.cpp b/test/test-node.cpp deleted file mode 100644 index d771d3ae..00000000 --- a/test/test-node.cpp +++ /dev/null @@ -1,376 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "catchain/catchain.h" - -#include "crypto/common/refvector.hpp" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -template -std::ostream &operator<<(std::ostream &stream, const td::UInt &x) { - for (size_t i = 0; i < size / 8; i++) { - stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15); - } - - return stream; -} - -class TestNode : public td::actor::Actor { - private: - std::vector ping_ids_; - td::Timestamp next_dht_dump_; - - td::actor::ActorOwn adnl_; - std::vector> dht_nodes_; - td::actor::ActorOwn overlay_manager_; - std::vector> overlays_; - std::vector> catchains_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - td::int32 broadcast_size_ = 100; - - void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) { - LOG(ERROR) << "MESSAGE FROM " << src << " to " << dst << " of size " << std::to_string(data.size()) << "\n"; - } - - void receive_broadcast(td::UInt256 overlay_id, td::BufferSlice data) { - LOG(ERROR) << "BROADCAST IN " << overlay_id << " hash=" << td::sha256(data.as_slice()) << "\n"; - } - - void receive_query(td::UInt256 src, td::UInt256 dst, td::BufferSlice data, td::Promise promise) { - auto Q = ton::fetch_tl_object(std::move(data), true); - CHECK(Q.is_ok()); - auto R = Q.move_as_ok(); - LOG(ERROR) << "QUERY " - << " FROM " << src << " to " << dst << ": " << ton::ton_api::to_string(R) << "\n"; - promise.set_value(serialize_tl_object(ton::create_tl_object(), true)); - } - - void catchain_new_block(td::UInt256 src, td::uint64 height, td::BufferSlice data) { - LOG(ERROR) << "CATCHAIN BLOCK: " << src << "@" << height << ": " << td::sha256_uint256(data.as_slice()) << "\n"; - } - void catchain_bad_block(td::UInt256 src) { - LOG(ERROR) << "CATCHAIN BAD BLOCK\n"; - } - void catchain_broadcast(td::BufferSlice data) { - LOG(ERROR) << "CATCHAIN BROADCAST " << td::sha256_uint256(data.as_slice()) << "\n"; - } - - std::unique_ptr make_callback() { - class Callback : public ton::adnl::Adnl::Callback { - public: - void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::receive_message, src, dst, std::move(data)); - } - void receive_query(td::UInt256 src, td::UInt256 dst, td::BufferSlice data, - td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::receive_query, src, dst, std::move(data), std::move(promise)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - std::unique_ptr make_catchain_callback() { - class Callback : public ton::CatChainActor::Callback { - public: - void new_block(td::UInt256 src, td::uint64 height, td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::catchain_new_block, src, height, std::move(data)); - } - void bad_block(td::UInt256 src) override { - td::actor::send_closure(id_, &TestNode::catchain_bad_block, src); - } - void broadcast(td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::catchain_broadcast, std::move(data)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - std::unique_ptr make_overlay_callback() { - class Callback : public ton::overlay::Overlays::Callback { - public: - void receive_message(td::UInt256 src, td::UInt256 overlay_id, td::BufferSlice data) override { - } - void receive_query(td::UInt256 src, td::uint64 query_id, td::UInt256 overlay_id, td::BufferSlice data) override { - } - - void receive_broadcast(td::UInt256 overlay_id, td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::receive_broadcast, overlay_id, std::move(data)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - public: - void set_broadcast_size(td::int32 size) { - broadcast_size_ = size; - } - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void start_up() override { - alarm_timestamp() = td::Timestamp::in(1); - } - void alarm() override { - /*if (overlays_.size() > 0 && broadcast_size_ > 0) { - td::BufferSlice s(broadcast_size_); - td::Random::secure_bytes(s.as_slice()); - - td::actor::send_closure(overlay_manager_, &ton::overlay::OverlayManager::send_broadcast_fer, overlays_[0].first, - overlays_[0].second, ton::create_tl_object(s.as_slice().str())); - }*/ - for (auto &chain : catchains_) { - td::BufferSlice s(broadcast_size_); - td::Random::secure_bytes(s.as_slice()); - - td::actor::send_closure(chain, &ton::CatChainActor::add_event, std::move(s)); - } - alarm_timestamp() = td::Timestamp::in(1.0); - if (next_dht_dump_.is_in_past()) { - /*for (auto &node : dht_nodes_) { - char b[10240]; - td::StringBuilder sb({b, 10000}); - node->get_actor_unsafe().dump(sb); - LOG(DEBUG) << sb.as_cslice().c_str(); - }*/ - next_dht_dump_ = td::Timestamp::in(60.0); - } - } - TestNode() { - adnl_ = ton::adnl::Adnl::create("/var/ton-work/db.adnl"); - } - void run() { - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - /*if (!lc.net_) { - LOG(FATAL) << "local config does not contain NET section"; - }*/ - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - if (!gc.dht_) { - LOG(FATAL) << "global config does not contain dht section"; - } - - for (auto &it : lc.dht_) { - if (it->get_id() == ton::ton_api::dht_config_local::ID) { - auto R = ton::dht::Dht::create_from_json( - ton::clone_tl_object(gc.dht_), ton::move_tl_object_as(it), adnl_.get()); - if (R.is_error()) { - LOG(FATAL) << "fail creating dht node: " << R.move_as_error(); - } - dht_nodes_.push_back(R.move_as_ok()); - } else { - auto I = ton::move_tl_object_as(it); - for (int i = 0; i < I->cnt_; i++) { - auto R = ton::dht::Dht::create_random(ton::clone_tl_object(gc.dht_), ton::clone_tl_object(I->addr_list_), - adnl_.get()); - if (R.is_error()) { - LOG(FATAL) << "fail creating dht node: " << R.move_as_error(); - } - dht_nodes_.push_back(R.move_as_ok()); - } - } - } - - CHECK(dht_nodes_.size() > 0); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_dht_node, dht_nodes_[0].get()); - //td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::register_dht_node, dht_nodes_[0].get()); - - overlay_manager_ = ton::overlay::Overlays::create(adnl_.get(), dht_nodes_[0].get()); - - for (auto &it : lc.public_overlays_) { - if (it->get_id() == ton::ton_api::overlay_config_local::ID) { - auto X = ton::move_tl_object_as(it); - auto id = ton::create_tl_object(X->name_.clone()); - auto Id = ton::move_tl_object_as(id); - auto sid = ton::adnl_short_id(Id); - overlays_.emplace_back(X->id_->id_, sid); - td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::create_public_overlay, X->id_->id_, - std::move(Id), make_overlay_callback()); - } else { - auto X = ton::move_tl_object_as(it); - for (int i = 0; i < X->cnt_; i++) { - auto pk = ton::adnl_generate_random_pk(); - auto local_id = ton::adnl_short_id(ton::get_public_key(pk)); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, std::move(pk), ton::clone_tl_object(X->addr_list_)); - - auto id = ton::create_tl_object(X->name_.clone()); - auto Id = ton::move_tl_object_as(id); - auto sid = ton::adnl_short_id(Id); - overlays_.emplace_back(local_id, sid); - td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::create_public_overlay, local_id, - std::move(Id), make_overlay_callback()); - } - } - } - - //auto C = ton::CatChainActor::create(nullptr, adnl_.get(), overlay_manager_.get(), - // std::vector>()); - - for (auto &it : lc.catchains_) { - auto tag = it->tag_; - for (auto &V : gc.catchains_) { - if (V->tag_ == tag) { - auto v = std::move(clone_tl_object(V)->nodes_); - auto C = ton::CatChainActor::create(make_catchain_callback(), adnl_.get(), overlay_manager_.get(), - std::move(v), it->id_->id_, tag); - catchains_.push_back(std::move(C)); - } - } - } - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_DEBUG); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb({b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('s', "broadcast-size", "size of broadcast", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_broadcast_size, std::atoi(fname.str().c_str())); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({2}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-rldp.cpp b/test/test-rldp.cpp index 11344c55..b07f5f7d 100644 --- a/test/test-rldp.cpp +++ b/test/test-rldp.cpp @@ -40,7 +40,7 @@ int main() { SET_VERBOSITY_LEVEL(verbosity_INFO); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-rldp"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-rldp2.cpp b/test/test-rldp2.cpp index 5367ffd3..646b27d5 100644 --- a/test/test-rldp2.cpp +++ b/test/test-rldp2.cpp @@ -40,7 +40,7 @@ int main() { SET_VERBOSITY_LEVEL(verbosity_INFO); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-rldp2"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-ton-collator.cpp b/test/test-ton-collator.cpp index 9ed5c781..b76dd5eb 100644 --- a/test/test-ton-collator.cpp +++ b/test/test-ton-collator.cpp @@ -50,7 +50,7 @@ #include "validator/fabric.h" #include "validator/impl/collator.h" -#include "crypto/vm/cp0.h" +#include "crypto/vm/vm.h" #include "crypto/block/block-db.h" #include "common/errorlog.h" @@ -408,7 +408,7 @@ int main(int argc, char *argv[]) { SET_VERBOSITY_LEVEL(verbosity_INFO); td::set_default_failure_signal_handler().ensure(); - CHECK(vm::init_op_cp0()); + vm::init_vm().ensure(); td::actor::ActorOwn x; diff --git a/test/test-validator-session-state.cpp b/test/test-validator-session-state.cpp index 819c1cc2..5ed08add 100644 --- a/test/test-validator-session-state.cpp +++ b/test/test-validator-session-state.cpp @@ -48,6 +48,7 @@ class Description : public ton::validatorsession::ValidatorSessionDescription { return 0; } void *alloc(size_t size, size_t align, bool temp) override { + size = (size + 15) / 16 * 16; td::uint32 idx = temp ? 1 : 0; auto s = pdata_cur_[idx].fetch_add(size); CHECK(s + size <= pdata_size_[idx]); diff --git a/test/test-validator-session.cpp b/test/test-validator-session.cpp deleted file mode 100644 index e986795a..00000000 --- a/test/test-validator-session.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "rldp/rldp.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "td/utils/overloaded.h" -#include "catchain/catchain.h" -#include "validator-session/validator-session.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -class TestNode : public td::actor::Actor { - private: - td::actor::ActorOwn keyring_; - td::actor::ActorOwn adnl_; - td::actor::ActorOwn rldp_; - std::vector> dht_nodes_; - td::actor::ActorOwn overlay_manager_; - std::vector> validator_sessions_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - std::unique_ptr make_vs_callback() { - class Callback : public ton::validatorsession::ValidatorSession::Callback { - public: - void on_candidate(td::uint32 round, ton::PublicKeyHash source, - ton::validatorsession::ValidatorSessionRootHash root_hash, td::BufferSlice data, - td::BufferSlice extra, - td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::on_candidate, round, source, root_hash, std::move(data), - std::move(extra), std::move(promise)); - } - void on_generate_slot(td::uint32 round, td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::on_generate_slot, round, std::move(promise)); - } - void on_block_committed(td::uint32 round, ton::PublicKeyHash src, - ton::validatorsession::ValidatorSessionRootHash root_hash, - ton::validatorsession::ValidatorSessionFileHash file_hash, td::BufferSlice data, - std::vector> signatures, - ton::validatorsession::ValidatorSessionStats stats) override { - td::actor::send_closure(id_, &TestNode::on_block_committed, round, root_hash, std::move(data), - std::move(signatures)); - } - /*void on_missing_block_committed( - td::uint32 round, ton::validatorsession::ValidatorSessionRootHash root_hash, ton::validatorsession::ValidatorSessionFileHash file_hash, - td::BufferSlice data, std::vector> signatures) override { - td::actor::send_closure(id_, &TestNode::on_block_committed_abscent, round, root_hash, file_hash, - std::move(data), std::move(signatures)); - }*/ - void on_block_skipped(td::uint32 round) override { - td::actor::send_closure(id_, &TestNode::on_block_skipped, round); - } - void get_approved_candidate(ton::validatorsession::ValidatorSessionRootHash root_hash, - ton::validatorsession::ValidatorSessionFileHash file_hash, - ton::validatorsession::ValidatorSessionFileHash collated_data_file_hash, - td::Promise promise) override { - UNREACHABLE(); - } - - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - td::uint64 height_ = 0; - - public: - void on_candidate(td::uint32 round, ton::PublicKeyHash source, - ton::validatorsession::ValidatorSessionRootHash root_hash, td::BufferSlice data, - td::BufferSlice collated, - td::Promise promise) { - auto sh = sha256_bits256(data.as_slice()); - auto B = ton::fetch_tl_object(std::move(data), true); - if (B.is_error()) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{B.move_as_error().to_string(), td::BufferSlice()}); - return; - } - if (collated.size() != 32) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad collated data length", td::BufferSlice()}); - return; - } - td::Bits256 x; - x.as_slice().copy_from(collated.as_slice().truncate(32)); - if (x != sh) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad block hash", td::BufferSlice()}); - return; - } - auto block = B.move_as_ok(); - if (block->root_hash_ != root_hash) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad root hash", td::BufferSlice()}); - return; - } - if (block->root_hash_ != sha256_bits256(block->data_.as_slice())) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad root hash (2)", td::BufferSlice()}); - return; - } - if (block->height_ != static_cast(height_) + 1) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad root height", td::BufferSlice()}); - return; - } - promise.set_result(ton::validatorsession::ValidatorSession::CandidateDecision{0}); - } - void on_generate_slot(td::uint32 round, td::Promise promise) { - auto data = td::BufferSlice{10000}; - td::Random::secure_bytes(data.as_slice()); - auto root_hash = sha256_bits256(data.as_slice()); - auto block = - ton::create_tl_object(root_hash, height_ + 1, std::move(data)); - - auto B = ton::serialize_tl_object(block, true); - auto hash = sha256_bits256(B.as_slice()); - auto collated = td::BufferSlice{32}; - collated.as_slice().copy_from(as_slice(hash)); - - /*BlockId id; - BlockStatus status; - RootHash root_hash; - FileHash file_hash; - FileHash collated_file_hash; - td::BufferSlice data; - td::BufferSlice collated_data;*/ - auto collated_file_hash = td::sha256_bits256(collated.as_slice()); - ton::BlockCandidate candidate{ton::BlockIdExt{ton::BlockId{0, 0, 0}, root_hash, td::sha256_bits256(B.as_slice())}, - collated_file_hash, std::move(B), std::move(collated)}; - promise.set_result(std::move(candidate)); - } - void on_block_committed(td::uint32 round, ton::validatorsession::ValidatorSessionRootHash root_hash, - td::BufferSlice data, - std::vector> signatures) { - LOG(ERROR) << "COMITTED BLOCK: ROUND=" << round << " ROOT_HASH=" << root_hash - << " DATA_HASH=" << sha256_bits256(data.as_slice()) << " SIGNED BY " << signatures.size(); - } - void on_block_skipped(td::uint32 round) { - LOG(ERROR) << "SKIPPED ROUND=" << round; - } - - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void start_up() override { - } - void alarm() override { - } - TestNode() { - } - void run() { - keyring_ = ton::keyring::Keyring::create("/var/ton-work/db.keyring"); - adnl_ = ton::adnl::Adnl::create("/var/ton-work/db.adnl", keyring_.get()); - rldp_ = ton::rldp::Rldp::create(adnl_.get()); - - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - /*if (!lc.net_) { - LOG(FATAL) << "local config does not contain NET section"; - }*/ - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - //td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - if (!gc.dht_) { - LOG(FATAL) << "global config does not contain dht section"; - } - auto dhtR = ton::dht::Dht::create_global_config(std::move(gc.dht_)); - if (dhtR.is_error()) { - LOG(FATAL) << "bad dht config: " << dhtR.move_as_error(); - } - auto dht = dhtR.move_as_ok(); - - for (auto &it : lc.dht_) { - std::vector adnl_ids; - ton::ton_api::downcast_call( - *it.get(), td::overloaded( - [&](ton::ton_api::dht_config_local &obj) { - adnl_ids.push_back(ton::adnl::AdnlNodeIdShort{obj.id_->id_}); - }, - [&](ton::ton_api::dht_config_random_local &obj) { - auto addrR = ton::adnl::AdnlAddressList::create(std::move(obj.addr_list_)); - addrR.ensure(); - auto addr = addrR.move_as_ok(); - for (td::int32 i = 0; i < obj.cnt_; i++) { - auto pk = ton::PrivateKey{ton::privkeys::Ed25519::random()}; - auto pub = pk.compute_public_key(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, ton::adnl::AdnlNodeIdFull{pub}, - addr); - auto adnl_id = ton::adnl::AdnlNodeIdShort{pub.compute_short_id()}; - adnl_ids.push_back(adnl_id); - } - })); - for (auto &id : adnl_ids) { - auto R = ton::dht::Dht::create(id, "/var/ton-work/db/", dht, keyring_.get(), adnl_.get()); - R.ensure(); - dht_nodes_.push_back(R.move_as_ok()); - } - } - - CHECK(dht_nodes_.size() > 0); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_dht_node, dht_nodes_[0].get()); - //td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::register_dht_node, dht_nodes_[0].get()); - - overlay_manager_ = - ton::overlay::Overlays::create("/var/ton-work/db.overlays", keyring_.get(), adnl_.get(), dht_nodes_[0].get()); - - //auto C = ton::CatChainActor::create(nullptr, adnl_.get(), overlay_manager_.get(), - // std::vector>()); - - for (auto &it : lc.catchains_) { - auto tag = it->tag_; - for (auto &V : gc.catchains_) { - if (V->tag_ == tag) { - auto v = std::move(clone_tl_object(V)->nodes_); - - std::vector w; - w.resize(v.size()); - for (size_t i = 0; i < w.size(); i++) { - w[i].pub_key = ton::PublicKey{v[i]}; - w[i].adnl_id = ton::adnl::AdnlNodeIdShort{w[i].pub_key.compute_short_id()}; - w[i].weight = 1; - } - - auto C = ton::validatorsession::ValidatorSession::create( - tag, ton::PublicKeyHash{it->id_->id_}, std::move(w), make_vs_callback(), keyring_.get(), adnl_.get(), - rldp_.get(), overlay_manager_.get(), "/var/ton-work/db/"); - td::actor::send_closure(C, &ton::validatorsession::ValidatorSession::start); - validator_sessions_.emplace_back(std::move(C)); - } - } - } - } -}; - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_INFO); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({7}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-validator.cpp b/test/test-validator.cpp deleted file mode 100644 index 7bc018f7..00000000 --- a/test/test-validator.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "rldp/rldp.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "auto/tl/ton_api.hpp" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/TsFileLog.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "catchain/catchain.h" -#include "validator-session/validator-session.h" -#include "ton-node/ton-node.h" -#include "validator/manager.h" -#include "td/utils/filesystem.h" -#include "td/utils/ThreadSafeCounter.h" -#include "td/utils/port/path.h" -#include "crypto/vm/cp0.h" -#include "td/utils/overloaded.h" - -#include "memprof/memprof.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include -#include - -class TestNode : public td::actor::Actor { - private: - td::actor::ActorOwn keyring_; - td::actor::ActorOwn adnl_; - td::actor::ActorOwn rldp_; - std::vector> dht_nodes_; - td::actor::ActorOwn overlay_manager_; - td::actor::ActorOwn validator_manager_; - td::actor::ActorOwn ton_node_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - std::string db_root_ = "/var/ton-work/db/"; - std::string zero_state_ = ""; - - public: - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void set_db_root(std::string db_root) { - db_root_ = db_root; - } - void set_zero_state(std::string zero_state) { - zero_state_ = zero_state; - } - void start_up() override { - } - void alarm() override { - } - TestNode() { - } - void run() { - td::mkdir(db_root_).ensure(); - - keyring_ = ton::keyring::Keyring::create(db_root_ + "/keyring"); - adnl_ = ton::adnl::Adnl::create(db_root_, keyring_.get()); - rldp_ = ton::rldp::Rldp::create(adnl_.get()); - - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - /*if (!lc.net_) { - LOG(FATAL) << "local config does not contain NET section"; - }*/ - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - //td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - for (auto &local_id : lc.local_ids_) { - auto pk = ton::PrivateKey{local_id->id_}; - auto pub = pk.compute_public_key(); - auto addr_list = ton::adnl::AdnlAddressList::create(local_id->addr_list_); - addr_list.ensure(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, ton::adnl::AdnlNodeIdFull{pub}, addr_list.move_as_ok()); - } - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - if (!gc.dht_) { - LOG(FATAL) << "global config does not contain dht section"; - } - - auto dhtR = ton::dht::Dht::create_global_config(std::move(gc.dht_)); - if (dhtR.is_error()) { - LOG(FATAL) << "bad dht config: " << dhtR.move_as_error(); - } - auto dht = dhtR.move_as_ok(); - - for (auto &it : lc.dht_) { - std::vector adnl_ids; - ton::ton_api::downcast_call( - *it.get(), td::overloaded( - [&](ton::ton_api::dht_config_local &obj) { - adnl_ids.push_back(ton::adnl::AdnlNodeIdShort{obj.id_->id_}); - }, - [&](ton::ton_api::dht_config_random_local &obj) { - auto addrR = ton::adnl::AdnlAddressList::create(std::move(obj.addr_list_)); - addrR.ensure(); - auto addr = addrR.move_as_ok(); - for (td::int32 i = 0; i < obj.cnt_; i++) { - auto pk = ton::PrivateKey{ton::privkeys::Ed25519::random()}; - auto pub = pk.compute_public_key(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, ton::adnl::AdnlNodeIdFull{pub}, - addr); - auto adnl_id = ton::adnl::AdnlNodeIdShort{pub.compute_short_id()}; - adnl_ids.push_back(adnl_id); - } - })); - for (auto &id : adnl_ids) { - auto R = ton::dht::Dht::create(id, db_root_, dht, keyring_.get(), adnl_.get()); - R.ensure(); - dht_nodes_.push_back(R.move_as_ok()); - } - } - - CHECK(dht_nodes_.size() > 0); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_dht_node, dht_nodes_[0].get()); - overlay_manager_ = ton::overlay::Overlays::create(db_root_, keyring_.get(), adnl_.get(), dht_nodes_[0].get()); - - CHECK(lc.validators_.size() <= 1); - CHECK(gc.validators_.size() <= 1); - - bool is_validator = false; - if (lc.validators_.size() == 1) { - CHECK(gc.validators_.size() == 1); - auto zero_state_id = - ton::BlockIdExt{ton::masterchainId, ton::shardIdAll, 0, gc.validators_[0]->zero_state_root_hash_, - gc.validators_[0]->zero_state_file_hash_}; - ton::PublicKeyHash id; - ton::adnl::AdnlNodeIdShort adnl_id; - ton::ton_api::downcast_call(*lc.validators_[0].get(), - td::overloaded( - [&](ton::ton_api::validator_config_local &cfg) { - id = ton::PublicKeyHash{cfg.id_->id_}; - adnl_id = ton::adnl::AdnlNodeIdShort{id}; - is_validator = true; - }, - [&](ton::ton_api::validator_config_random_local &cfg) { - auto privkey = ton::PrivateKey{ton::privkeys::Ed25519::random()}; - auto pubkey = ton::adnl::AdnlNodeIdFull{privkey.compute_public_key()}; - auto addrR = ton::adnl::AdnlAddressList::create(std::move(cfg.addr_list_)); - addrR.ensure(); - auto addr = addrR.move_as_ok(); - id = privkey.compute_short_id(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, - std::move(privkey), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, pubkey, addr); - adnl_id = ton::adnl::AdnlNodeIdShort{id}; - })); - - auto opts = ton::ValidatorManagerOptions::create( - zero_state_id, std::vector{ton::ShardIdFull{ton::basechainId, ton::shardIdAll}}); - CHECK(!opts.is_null()); - opts.write().set_allow_blockchain_init(is_validator); - validator_manager_ = - ton::ValidatorManagerFactory::create(is_validator ? id : ton::PublicKeyHash::zero(), opts, db_root_, - keyring_.get(), adnl_.get(), rldp_.get(), overlay_manager_.get()); - ton_node_ = - ton::TonNodeManager::create(adnl_id, gc.validators_[0]->zero_state_file_hash_, adnl_.get(), rldp_.get(), - dht_nodes_[0].get(), overlay_manager_.get(), validator_manager_.get(), db_root_); - - for (auto &x : lc.liteservers_) { - auto pk = ton::PrivateKey{x->id_}; - auto pub_k = ton::adnl::AdnlNodeIdFull{pk.compute_public_key()}; - auto id = pub_k.compute_short_id(); - - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, pub_k, ton::adnl::AdnlAddressList{}); - td::actor::send_closure(validator_manager_, &ton::ValidatorManager::add_ext_server_id, id); - td::actor::send_closure(validator_manager_, &ton::ValidatorManager::add_ext_server_port, - static_cast(x->port_)); - } - } - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -std::atomic need_stats_flag{false}; -void need_stats(int sig) { - need_stats_flag.store(true); -} -void dump_memory_stats() { - if (!is_memprof_on()) { - return; - } - LOG(WARNING) << "memory_dump"; - std::vector v; - dump_alloc([&](const AllocInfo &info) { v.push_back(info); }); - std::sort(v.begin(), v.end(), [](const AllocInfo &a, const AllocInfo &b) { return a.size > b.size; }); - size_t total_size = 0; - size_t other_size = 0; - int cnt = 0; - for (auto &info : v) { - if (cnt++ < 50) { - LOG(WARNING) << td::format::as_size(info.size) << td::format::as_array(info.backtrace); - } else { - other_size += info.size; - } - total_size += info.size; - } - LOG(WARNING) << td::tag("other", td::format::as_size(other_size)); - LOG(WARNING) << td::tag("total", td::format::as_size(total_size)); - LOG(WARNING) << td::tag("total traces", get_ht_size()); - LOG(WARNING) << td::tag("fast_backtrace_success_rate", get_fast_backtrace_success_rate()); -} -void dump_stats() { - dump_memory_stats(); - LOG(WARNING) << td::NamedThreadSafeCounter::get_default(); -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_INFO); - - td::set_default_failure_signal_handler().ensure(); - - CHECK(vm::init_op_cp0()); - - td::actor::ActorOwn x; - td::unique_ptr logger_; - SCOPE_EXIT { - td::log_interface = td::default_log_interface; - }; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) { - int v = VERBOSITY_NAME(FATAL) + (td::to_integer(arg)); - SET_VERBOSITY_LEVEL(v); - return td::Status::OK(); - }); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('i', "id", "id of instance", [&](td::Slice fname) { return td::Status::OK(); }); - p.add_option('D', "db", "root for dbs", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_db_root, fname.str()); - return td::Status::OK(); - }); - p.add_option('z', "zero-state", "file with serialized zero state", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_zero_state, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - logger_ = td::TsFileLog::create(fname.str()).move_as_ok(); - td::log_interface = logger_.get(); - return td::Status::OK(); - }); -#endif - td::set_runtime_signal_handler(1, need_stats).ensure(); - - td::actor::Scheduler scheduler({7}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - while (scheduler.run(1)) { - if (need_stats_flag.exchange(false)) { - dump_stats(); - } - } - - return 0; -} diff --git a/third-party/blst b/third-party/blst index e9dfc5ee..3dd0f804 160000 --- a/third-party/blst +++ b/third-party/blst @@ -1 +1 @@ -Subproject commit e9dfc5ee724b5b25d50a3b6226bee8c2c9d5e65d +Subproject commit 3dd0f804b1819e5d03fb22ca2e6fac105932043a diff --git a/third-party/rocksdb b/third-party/rocksdb index fcf3d75f..cb7a5e02 160000 --- a/third-party/rocksdb +++ b/third-party/rocksdb @@ -1 +1 @@ -Subproject commit fcf3d75f3f022a6a55ff1222d6b06f8518d38c7c +Subproject commit cb7a5e02edeb883193eb5b4901d5943f58e9add9 diff --git a/tl-utils/CMakeLists.txt b/tl-utils/CMakeLists.txt index b17b7dc9..d5c52d48 100644 --- a/tl-utils/CMakeLists.txt +++ b/tl-utils/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(TL_UTILS_SOURCE common-utils.hpp diff --git a/tl/CMakeLists.txt b/tl/CMakeLists.txt index 8adabeda..d0760a34 100644 --- a/tl/CMakeLists.txt +++ b/tl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_subdirectory(generate) set_source_files_properties(${TL_TON_API} PROPERTIES GENERATED TRUE) diff --git a/tl/generate/CMakeLists.txt b/tl/generate/CMakeLists.txt index 61d66c93..083d3973 100644 --- a/tl/generate/CMakeLists.txt +++ b/tl/generate/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) file(MAKE_DIRECTORY auto/tl) diff --git a/tl/tl/tl_json.h b/tl/tl/tl_json.h index 489bd6e8..8eee3aad 100644 --- a/tl/tl/tl_json.h +++ b/tl/tl/tl_json.h @@ -108,12 +108,13 @@ inline Status from_json(std::int32_t &to, JsonValue from) { inline Status from_json(bool &to, JsonValue from) { if (from.type() != JsonValue::Type::Boolean) { int32 x; + auto type = from.type(); auto status = from_json(x, std::move(from)); if (status.is_ok()) { to = x != 0; return Status::OK(); } - return Status::Error(PSLICE() << "Expected bool, got " << from.type()); + return Status::Error(PSLICE() << "Expected bool, got " << type); } to = from.get_boolean(); return Status::OK(); diff --git a/tonlib/CMakeLists.txt b/tonlib/CMakeLists.txt index 72c59c4d..dc3e9030 100644 --- a/tonlib/CMakeLists.txt +++ b/tonlib/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(BUILD_SHARED_LIBS "Use \"OFF\" for a static build." ON) diff --git a/tonlib/test/offline.cpp b/tonlib/test/offline.cpp index a1e5a0f6..35a95f5b 100644 --- a/tonlib/test/offline.cpp +++ b/tonlib/test/offline.cpp @@ -609,7 +609,30 @@ TEST(Tonlib, ConfigCache) { "seqno": 0, "root_hash": "gj+B8wb/AmlPk1z1AhVI484rhrUpgSr2oSFIh56VoSg=", "file_hash": "Z+IKwYS54DmmJmesw/nAD5DzWadnOCMzee+kdgSYDOg=" - } + }, + "hardforks": [ + { + "file_hash": "jF3RTD+OyOoP+OI9oIjdV6M8EaOh9E+8+c3m5JkPYdg=", + "seqno": 5141579, + "root_hash": "6JSqIYIkW7y8IorxfbQBoXiuY3kXjcoYgQOxTJpjXXA=", + "workchain": -1, + "shard": -9223372036854775808 + }, + { + "file_hash": "WrNoMrn5UIVPDV/ug/VPjYatvde8TPvz5v1VYHCLPh8=", + "seqno": 5172980, + "root_hash": "054VCNNtUEwYGoRe1zjH+9b1q21/MeM+3fOo76Vcjes=", + "workchain": -1, + "shard": -9223372036854775808 + }, + { + "file_hash": "xRaxgUwgTXYFb16YnR+Q+VVsczLl6jmYwvzhQ/ncrh4=", + "seqno": 5176527, + "root_hash": "SoPLqMe9Dz26YJPOGDOHApTSe5i0kXFtRmRh/zPMGuI=", + "workchain": -1, + "shard": -9223372036854775808 + } + ] } })abc"; auto custom = R"abc({ diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index 0eabe142..9ec664bb 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -1971,7 +1971,7 @@ class RunEmulator : public TonlibQueryActor { ton::UnixTime now = account_state_->get_sync_time(); bool is_special = address.workchain == ton::masterchainId && config->is_special_smartcontract(address.addr); block::Account account(address.workchain, address.addr.bits()); - if (!account.unpack(std::move(shard_account), td::Ref(), now, is_special)) { + if (!account.unpack(std::move(shard_account), now, is_special)) { check(td::Status::Error("Can't unpack shard account")); return; } diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 3d029c93..0149e06e 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/utils/opcode-timing.cpp b/utils/opcode-timing.cpp index f50d7041..4f5c8ab4 100644 --- a/utils/opcode-timing.cpp +++ b/utils/opcode-timing.cpp @@ -64,7 +64,6 @@ typedef struct { vm::Stack prepare_stack(td::Slice command) { const auto cell = to_cell(command); - vm::init_op_cp0(); vm::DictionaryBase::get_empty_dictionary(); vm::Stack stack; try { @@ -80,7 +79,6 @@ vm::Stack prepare_stack(td::Slice command) { runInfo time_run_vm(td::Slice command, td::Ref stack) { const auto cell = to_cell(command); - vm::init_op_cp0(); vm::DictionaryBase::get_empty_dictionary(); CHECK(stack.is_unique()); try { @@ -171,6 +169,7 @@ int main(int argc, char** argv) { setup = argv[1]; code = argv[2]; } + vm::init_vm().ensure(); const auto time = timeInstruction(setup, code); std::cout << std::fixed << std::setprecision(9) << code << "," << time.runtime.mean << "," << time.runtime.stddev << "," << time.gasUsage.mean << "," << time.gasUsage.stddev << "," << (int)time.errored << std::endl; diff --git a/validator-engine-console/CMakeLists.txt b/validator-engine-console/CMakeLists.txt index 48716960..634a5b8b 100644 --- a/validator-engine-console/CMakeLists.txt +++ b/validator-engine-console/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_executable (validator-engine-console validator-engine-console.cpp validator-engine-console.h validator-engine-console-query.cpp diff --git a/validator-engine/CMakeLists.txt b/validator-engine/CMakeLists.txt index d369a2c3..5df720fe 100644 --- a/validator-engine/CMakeLists.txt +++ b/validator-engine/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/validator-engine/validator-engine.cpp b/validator-engine/validator-engine.cpp index aaa4f849..23cc9347 100644 --- a/validator-engine/validator-engine.cpp +++ b/validator-engine/validator-engine.cpp @@ -38,7 +38,7 @@ #include "common/errorlog.h" -#include "crypto/vm/cp0.h" +#include "crypto/vm/vm.h" #include "crypto/fift/utils.h" #include "td/utils/filesystem.h" @@ -3774,7 +3774,7 @@ int main(int argc, char *argv[]) { td::actor::Scheduler scheduler({threads}); scheduler.run_in_context([&] { - CHECK(vm::init_op_cp0()); + vm::init_vm().ensure(); x = td::actor::create_actor("validator-engine"); for (auto &act : acts) { act(); diff --git a/validator-session/CMakeLists.txt b/validator-session/CMakeLists.txt index 4931e464..c769f4d8 100644 --- a/validator-session/CMakeLists.txt +++ b/validator-session/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/validator/CMakeLists.txt b/validator/CMakeLists.txt index 573cd8e5..068569de 100644 --- a/validator/CMakeLists.txt +++ b/validator/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/validator/impl/CMakeLists.txt b/validator/impl/CMakeLists.txt index 459e7724..f4b967a8 100644 --- a/validator/impl/CMakeLists.txt +++ b/validator/impl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index abbe904c..c2088f47 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -202,7 +202,7 @@ class Collator final : public td::actor::Actor { // block::Account* lookup_account(td::ConstBitPtr addr) const; std::unique_ptr make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra, bool force_create = false); + bool force_create); td::Result make_account(td::ConstBitPtr addr, bool force_create = false); td::actor::ActorId get_self() { return actor_id(this); @@ -269,10 +269,6 @@ class Collator final : public td::actor::Actor { void after_get_external_messages(td::Result>> res); td::Result register_external_message_cell(Ref ext_msg, const ExtMessage::Hash& ext_hash); // td::Result register_external_message(td::Slice ext_msg_boc); - td::Result register_ihr_message_cell(Ref ihr_msg); - td::Result register_ihr_message(td::Slice ihr_msg_boc); - td::Result register_shard_signatures_cell(Ref shard_blk_signatures); - td::Result register_shard_signatures(td::Slice shard_blk_signatures_boc); void register_new_msg(block::NewOutMsg msg); void register_new_msgs(block::transaction::Transaction& trans); bool process_new_messages(bool enqueue_only = false); @@ -285,7 +281,7 @@ class Collator final : public td::actor::Actor { bool enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt); bool enqueue_transit_message(Ref msg, Ref old_msg_env, ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix, ton::AccountIdPrefixFull dest_prefix, - td::RefInt256 fwd_fee_remaining, ton::LogicalTime enqueued_lt); + td::RefInt256 fwd_fee_remaining); bool delete_out_msg_queue_msg(td::ConstBitPtr key); bool insert_in_msg(Ref in_msg); bool insert_out_msg(Ref out_msg); diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 54941f21..d133278f 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -54,6 +54,20 @@ static inline bool dbg(int c) { return true; } +/** + * Constructs a Collator object. + * + * @param shard The shard of the new block. + * @param is_hardfork A boolean indicating whether the new block is a hardfork. + * @param min_ts The minimum UnixTime for the new block. + * @param min_masterchain_block_id The the minimum reference masterchain block. + * @param prev A vector of BlockIdExt representing the previous blocks. + * @param validator_set A reference to the ValidatorSet. + * @param collator_id The public key of the block creator. + * @param manager The ActorId of the ValidatorManager. + * @param timeout The timeout for the collator. + * @param promise The promise to return the result. + */ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockIdExt min_masterchain_block_id, std::vector prev, td::Ref validator_set, Ed25519_PublicKey collator_id, td::actor::ActorId manager, td::Timestamp timeout, @@ -77,6 +91,14 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockId }) { } +/** + * Starts the Collator. + * + * This function initializes the Collator by performing various checks and queries to the ValidatorManager. + * It checks the validity of the shard, the previous blocks, and the workchain. + * If all checks pass, it proceeds to query the ValidatorManager for the top masterchain state block, shard states, block data, external messages, and shard blocks. + * The results of these queries are handled by corresponding callback functions. + */ void Collator::start_up() { LOG(DEBUG) << "Collator for shard " << shard_.to_str() << " started"; LOG(DEBUG) << "Previous block #1 is " << prev_blocks.at(0).to_str(); @@ -250,10 +272,21 @@ void Collator::start_up() { CHECK(pending); } +/** + * Raises an error when timeout is reached. + */ void Collator::alarm() { fatal_error(ErrorCode::timeout, "timeout"); } +/** + * Generates a string representation of a shard. + * + * @param workchain The workchain ID of the shard. + * @param shard The shard ID. + * + * @returns A string representation of the shard. + */ std::string show_shard(ton::WorkchainId workchain, ton::ShardId shard) { char tmp[128]; char* ptr = tmp + snprintf(tmp, 31, "%d:", workchain); @@ -268,14 +301,35 @@ std::string show_shard(ton::WorkchainId workchain, ton::ShardId shard) { return {tmp, ptr}; } +/** + * Returns a string representation of the shard of the given block. + * + * @param blk_id The BlockId object. + * + * @returns A string representation of the shard. + */ std::string show_shard(const ton::BlockId blk_id) { return show_shard(blk_id.workchain, blk_id.shard); } +/** + * Converts a `ShardIdFull` object to a string representation. + * + * @param blk_id The `ShardIdFull` object to convert. + * + * @returns The string representation of the `ShardIdFull` object. + */ std::string show_shard(const ton::ShardIdFull blk_id) { return show_shard(blk_id.workchain, blk_id.shard); } +/** + * Handles a fatal error encountered during block candidate generation. + * + * @param error The error encountered. + * + * @returns False to indicate that a fatal error occurred. + */ bool Collator::fatal_error(td::Status error) { error.ensure_error(); LOG(ERROR) << "cannot generate block candidate for " << show_shard(shard_) << " : " << error.to_string(); @@ -287,14 +341,39 @@ bool Collator::fatal_error(td::Status error) { return false; } +/** + * Handles a fatal error encountered during block candidate generation. + * + * @param err_code The error code. + * @param err_msg The error message. + * + * @returns False to indicate that a fatal error occurred. + */ bool Collator::fatal_error(int err_code, std::string err_msg) { return fatal_error(td::Status::Error(err_code, err_msg)); } +/** + * Handles a fatal error encountered during block candidate generation. + * + * @param err_msg The error message. + * @param err_code The error code. + * + * @returns False to indicate that a fatal error occurred. + */ bool Collator::fatal_error(std::string err_msg, int err_code) { return fatal_error(td::Status::Error(err_code, err_msg)); } +/** + * Checks if there are any pending tasks. + * + * If there are no pending tasks, it continues collation. + * If collation fails, it raises a fatal error. + * If an exception is caught during collation, it raises a fatal error with the corresponding error message. + * + * @returns None + */ void Collator::check_pending() { // LOG(DEBUG) << "pending = " << pending; if (!pending) { @@ -309,6 +388,13 @@ void Collator::check_pending() { } } +/** + * Registers a masterchain state. + * + * @param other_mc_state The masterchain state to register. + * + * @returns True if the registration is successful, false otherwise. + */ bool Collator::register_mc_state(Ref other_mc_state) { if (other_mc_state.is_null() || mc_state_.is_null()) { return false; @@ -334,6 +420,14 @@ bool Collator::register_mc_state(Ref other_mc_state) { return true; } +/** + * Requests the auxiliary masterchain state. + * + * @param seqno The seqno of the block. + * @param state A reference to the auxiliary masterchain state. + * + * @returns True if the auxiliary masterchain state is successfully requested, false otherwise. + */ bool Collator::request_aux_mc_state(BlockSeqno seqno, Ref& state) { if (mc_state_.is_null()) { return fatal_error(PSTRING() << "cannot find masterchain block with seqno " << seqno @@ -366,6 +460,13 @@ bool Collator::request_aux_mc_state(BlockSeqno seqno, Ref& st return true; } +/** + * Retrieves the auxiliary masterchain state for a given block sequence number. + * + * @param seqno The sequence number of the block. + * + * @returns A reference to the auxiliary masterchain state if found, otherwise an empty reference. + */ Ref Collator::get_aux_mc_state(BlockSeqno seqno) const { auto it = aux_mc_states_.find(seqno); if (it != aux_mc_states_.end()) { @@ -375,6 +476,13 @@ Ref Collator::get_aux_mc_state(BlockSeqno seqno) const { } } +/** + * Callback function called after retrieving the auxiliary shard state. + * Handles the retrieved shard state and performs necessary checks and registrations. + * + * @param blkid The BlockIdExt of the shard state. + * @param res The result of retrieving the shard state. + */ void Collator::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result> res) { LOG(DEBUG) << "in Collator::after_get_aux_shard_state(" << blkid.to_str() << ")"; --pending; @@ -400,6 +508,14 @@ void Collator::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result, BlockIdExt>> res) { LOG(DEBUG) << "in Collator::after_get_mc_state()"; --pending; @@ -455,6 +576,12 @@ void Collator::after_get_mc_state(td::Result, Bl check_pending(); } +/** + * Callback function called after retrieving the shard state for a previous block. + * + * @param idx The index of the previous shard block (0 or 1). + * @param res The retrieved shard state. + */ void Collator::after_get_shard_state(int idx, td::Result> res) { LOG(DEBUG) << "in Collator::after_get_shard_state(" << idx << ")"; --pending; @@ -482,6 +609,12 @@ void Collator::after_get_shard_state(int idx, td::Result> res) { check_pending(); } +/** + * Callback function called after retrieving block data for a previous block. + * + * @param idx The index of the previous block (0 or 1). + * @param res The retreived block data. + */ void Collator::after_get_block_data(int idx, td::Result> res) { LOG(DEBUG) << "in Collator::after_get_block_data(" << idx << ")"; --pending; @@ -513,6 +646,11 @@ void Collator::after_get_block_data(int idx, td::Result> res) { check_pending(); } +/** + * Callback function called after retrieving shard block descriptions for masterchain. + * + * @param res The retrieved shard block descriptions. + */ void Collator::after_get_shard_blocks(td::Result>> res) { --pending; if (res.is_error()) { @@ -525,6 +663,11 @@ void Collator::after_get_shard_blocks(td::Resultget_neighbor_shard_hash_ids(shard_); @@ -634,6 +787,12 @@ bool Collator::request_neighbor_msg_queues() { return true; } +/** + * Handles the result of obtaining the outbound queue for a neighbor. + * + * @param i The index of the neighbor. + * @param res The obtained outbound queue. + */ void Collator::got_neighbor_out_queue(int i, td::Result> res) { LOG(DEBUG) << "obtained outbound queue for neighbor #" << i; --pending; @@ -692,6 +851,12 @@ void Collator::got_neighbor_out_queue(int i, td::Result> res) check_pending(); } +/** + * Unpacks and merges the states of two previous blocks. + * Used if the block is after_merge. + * + * @returns True if the unpacking and merging was successful, false otherwise. + */ bool Collator::unpack_merge_last_state() { LOG(DEBUG) << "unpack/merge last states"; // 0. mechanically merge two ShardStateUnsplit into split_state constructor @@ -730,6 +895,12 @@ bool Collator::unpack_merge_last_state() { return import_shard_state_data(ss0); } +/** + * Unpacks the state of the previous block. + * Used if the block is not after_merge. + * + * @returns True if the unpacking is successful, false otherwise. + */ bool Collator::unpack_last_state() { if (after_merge_) { if (!unpack_merge_last_state()) { @@ -749,6 +920,15 @@ bool Collator::unpack_last_state() { import_shard_state_data(ss); } +/** + * Unpacks the state of a previous block and performs necessary checks. + * + * @param ss The ShardState object to unpack the state into. + * @param blkid The BlockIdExt of the previous block. + * @param prev_state_root The root of the state. + * + * @returns True if the unpacking and checks are successful, false otherwise. + */ bool Collator::unpack_one_last_state(block::ShardState& ss, BlockIdExt blkid, Ref prev_state_root) { auto res = ss.unpack_state_ext(blkid, std::move(prev_state_root), global_id_, prev_mc_block_seqno, after_split_, after_split_ | after_merge_, [self = this](ton::BlockSeqno mc_seqno) { @@ -767,6 +947,14 @@ bool Collator::unpack_one_last_state(block::ShardState& ss, BlockIdExt blkid, Re return true; } +/** + * Splits the state of previous block. + * Used if the block is after_split. + * + * @param ss The ShardState object representing the previous state. The result is stored here. + * + * @returns True if the split operation is successful, false otherwise. + */ bool Collator::split_last_state(block::ShardState& ss) { LOG(INFO) << "Splitting previous state " << ss.id_.to_str() << " to subshard " << shard_.to_str(); CHECK(after_split_); @@ -788,11 +976,19 @@ bool Collator::split_last_state(block::ShardState& ss) { return true; } -// SETS: account_dict, shard_libraries_, mc_state_extra -// total_balance_ = old_total_balance_, total_validator_fees_ -// SETS: overload_history_, underload_history_ -// SETS: prev_state_utime_, prev_state_lt_, prev_vert_seqno_ -// SETS: out_msg_queue, processed_upto_, ihr_pending +/** + * Imports the shard state data into the Collator object. + * + * SETS: account_dict, shard_libraries_, mc_state_extra + * total_balance_ = old_total_balance_, total_validator_fees_ + * SETS: overload_history_, underload_history_ + * SETS: prev_state_utime_, prev_state_lt_, prev_vert_seqno_ + * SETS: out_msg_queue, processed_upto_, ihr_pending + * + * @param ss The ShardState object containing the shard state data. + * + * @returns True if the import was successful, False otherwise. + */ bool Collator::import_shard_state_data(block::ShardState& ss) { account_dict = std::move(ss.account_dict_); shard_libraries_ = std::move(ss.shard_libraries_); @@ -813,6 +1009,12 @@ bool Collator::import_shard_state_data(block::ShardState& ss) { return true; } +/** + * Adds trivials neighbor after merging two shards. + * Trivial neighbors are the two previous blocks. + * + * @returns True if the operation is successful, false otherwise. + */ bool Collator::add_trivial_neighbor_after_merge() { LOG(DEBUG) << "in add_trivial_neighbor_after_merge()"; CHECK(prev_blocks.size() == 2); @@ -847,6 +1049,12 @@ bool Collator::add_trivial_neighbor_after_merge() { return true; } +/** + * Adds a trivial neighbor. + * A trivial neighbor is the previous block. + * + * @returns True if the operation is successful, false otherwise. + */ bool Collator::add_trivial_neighbor() { LOG(DEBUG) << "in add_trivial_neighbor()"; if (after_merge_) { @@ -982,6 +1190,15 @@ bool Collator::add_trivial_neighbor() { return true; } +/** + * Checks the previous block against the block registered in the masterchain. + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * @param chk_chain_len Flag indicating whether to check the chain length. + * + * @returns True if the previous block is valid, false otherwise. + */ bool Collator::check_prev_block(const BlockIdExt& listed, const BlockIdExt& prev, bool chk_chain_len) { if (listed.seqno() > prev.seqno()) { return fatal_error(PSTRING() << "cannot generate a shardchain block after previous block " << prev.to_str() @@ -1001,6 +1218,14 @@ bool Collator::check_prev_block(const BlockIdExt& listed, const BlockIdExt& prev return true; } +/** + * Checks the previous block against the block registered in the masterchain. + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * + * @returns True if the previous block is equal to the one registered in the masterchain, false otherwise. + */ bool Collator::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev) { if (listed != prev) { return fatal_error(PSTRING() << "cannot generate shardchain block for shard " << shard_.to_str() @@ -1011,6 +1236,11 @@ bool Collator::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt return true; } +/** + * Checks the validity of the shard configuration of the current shard. + * + * @returns True if the shard configuration is valid, false otherwise. + */ bool Collator::check_this_shard_mc_info() { wc_info_ = config_->get_workchain_info(workchain()); if (wc_info_.is_null()) { @@ -1145,6 +1375,11 @@ bool Collator::check_this_shard_mc_info() { return true; } +/** + * Initializes the block limits for the collator. + * + * @returns True if the block limits were successfully initialized, false otherwise. + */ bool Collator::init_block_limits() { CHECK(block_limits_); CHECK(state_usage_tree_); @@ -1153,6 +1388,11 @@ bool Collator::init_block_limits() { return true; } +/** + * Performs pre-initialization steps for the Collator. + * + * @returns True if pre-initialization is successful, false otherwise. + */ bool Collator::do_preinit() { CHECK(prev_blocks.size() == 1U + after_merge_); last_block_seqno = prev_blocks[0].seqno(); @@ -1209,6 +1449,12 @@ bool Collator::do_preinit() { return true; } +/** + * Adjusts the shard configuration by adding new workchains to the shard configuration in the masterchain state. + * Used in masterchain collator. + * + * @returns True if the shard configuration was successfully adjusted, false otherwise. + */ bool Collator::adjust_shard_config() { CHECK(is_masterchain() && config_ && shard_conf_); const block::WorkchainSet& wset = config_->get_workchain_list(); @@ -1237,12 +1483,30 @@ bool Collator::adjust_shard_config() { return true; } +/** + * Compares two ShardTopBlockDescription references based on their block IDs. + * + * @param a The first ShardTopBlockDescription reference. + * @param b The second ShardTopBlockDescription reference. + * + * @returns True if a is considered less than b, false otherwise. + */ static bool cmp_shard_block_descr_ref(const Ref& a, const Ref& b) { BlockId x = a->block_id().id, y = b->block_id().id; return x.workchain < y.workchain || (x.workchain == y.workchain && (x.shard < y.shard || (x.shard == y.shard && x.seqno > y.seqno))); } +/** + * Stores the fees imported from a shard blocks to `fees_import_dict_`. + * Used in masterchain collator. + * + * @param shard The shard identifier. + * @param fees The fees imported from the block. + * @param created The fee for creating shard blocks. + * + * @returns True if the fees were successfully stored, false otherwise. + */ bool Collator::store_shard_fees(ShardIdFull shard, const block::CurrencyCollection& fees, const block::CurrencyCollection& created) { if (shard.is_valid() && fees.is_valid()) { @@ -1258,6 +1522,14 @@ bool Collator::store_shard_fees(ShardIdFull shard, const block::CurrencyCollecti } } +/** + * Stores the fees imported from a shard blocks to `fees_import_dict_`. + * Used in masterchain collator. + * + * @param descr A reference to the McShardHash object containing the shard information. + * + * @returns True if the shard fees and funds created were successfully stored, false otherwise. + */ bool Collator::store_shard_fees(Ref descr) { CHECK(descr.not_null()); CHECK(descr->fees_collected_.is_valid()); @@ -1266,6 +1538,11 @@ bool Collator::store_shard_fees(Ref descr) { return true; } +/** + * Imports new top shard blocks and updates the shard configuration. + * + * @returns True if the import was successful, false otherwise. + */ bool Collator::import_new_shard_top_blocks() { if (shard_block_descr_.empty()) { return true; @@ -1404,6 +1681,13 @@ bool Collator::import_new_shard_top_blocks() { return true; } +/** + * Registers the shard block creators to block_create_count_ + * + * @param creator_list A vector of Bits256 representing the shard block creators. + * + * @returns True if the registration was successful, False otherwise. + */ bool Collator::register_shard_block_creators(std::vector creator_list) { for (const auto& x : creator_list) { LOG(DEBUG) << "registering block creator " << x.to_hex(); @@ -1418,6 +1702,11 @@ bool Collator::register_shard_block_creators(std::vector creator_li return true; } +/** + * Performs pre-initialization and collates the new block. + * + * @returns True if collation is successful, false otherwise. + */ bool Collator::try_collate() { if (!preinit_complete) { LOG(DEBUG) << "running do_preinit()"; @@ -1477,6 +1766,14 @@ bool Collator::try_collate() { return do_collate(); } +/** + * Adjusts one entry from the processed up to information using the masterchain state that is referenced in the entry. + * + * @param proc The MsgProcessedUpto object. + * @param owner The shard that the MsgProcessesUpto information is taken from. + * + * @returns True if the processed up to information was successfully adjusted, false otherwise. + */ bool Collator::fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton::ShardIdFull& owner) { if (proc.compute_shard_end_lt) { return true; @@ -1493,6 +1790,13 @@ bool Collator::fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton:: return (bool)proc.compute_shard_end_lt; } +/** + * Adjusts the processed up to collection using the using the auxilliary masterchain states. + * + * @param upto The MsgProcessedUptoCollection to be adjusted. + * + * @returns True if all entries were successfully adjusted, False otherwise. + */ bool Collator::fix_processed_upto(block::MsgProcessedUptoCollection& upto) { for (auto& entry : upto.list) { if (!fix_one_processed_upto(entry, upto.owner)) { @@ -1502,6 +1806,14 @@ bool Collator::fix_processed_upto(block::MsgProcessedUptoCollection& upto) { return true; } +/** + * Initializes the unix time for the new block. + * + * Unix time is set based on the current time, and the timestamps of the previous blocks. + * If the previous block has a timestamp too far in the past then skipping importing external messages and new shard blocks is allowed. + * + * @returns True if the initialization is successful, false otherwise. + */ bool Collator::init_utime() { CHECK(config_); // consider unixtime and lt from previous block(s) of the same shardchain @@ -1547,6 +1859,9 @@ bool Collator::init_utime() { return true; } +/** + * Initializes the logical time of the new block. + */ bool Collator::init_lt() { CHECK(config_); start_lt = config_->lt; @@ -1569,6 +1884,11 @@ bool Collator::init_lt() { return true; } +/** + * Fetches and initializes the configuration parameters using the masterchain configuration. + * + * @returns True if the configuration parameters were successfully fetched and initialized, false otherwise. + */ bool Collator::fetch_config_params() { auto res = block::FetchConfigParams::fetch_config_params(*config_, &old_mparams_, &storage_prices_, &storage_phase_cfg_, @@ -1583,6 +1903,13 @@ bool Collator::fetch_config_params() { return true; } +/** + * Computes the amount of extra currencies to be minted. + * + * @param to_mint A reference to the CurrencyCollection object to store the minted amount. + * + * @returns True if the computation is successful, false otherwise. + */ bool Collator::compute_minted_amount(block::CurrencyCollection& to_mint) { if (!is_masterchain()) { return to_mint.set_zero(); @@ -1633,6 +1960,11 @@ bool Collator::compute_minted_amount(block::CurrencyCollection& to_mint) { return true; } +/** + * Initializes value_flow_ and computes fees for creating the new block. + * + * @returns True if the initialization is successful, false otherwise. + */ bool Collator::init_value_create() { value_flow_.created.set_zero(); value_flow_.minted.set_zero(); @@ -1663,6 +1995,9 @@ bool Collator::init_value_create() { return true; } +/** + * Performs the collation of the new block. + */ bool Collator::do_collate() { // After do_collate started it will not be interrupted by timeout alarm_timestamp() = td::Timestamp::never(); @@ -1786,6 +2121,14 @@ bool Collator::do_collate() { return true; } +/** + * Dequeues an outbound message from the message queue of this shard. + * + * @param msg_envelope The message envelope to dequeue. + * @param delivered_lt The logical time at which the message was delivered. + * + * @returns True if the message was successfully dequeued, false otherwise. + */ bool Collator::dequeue_message(Ref msg_envelope, ton::LogicalTime delivered_lt) { LOG(DEBUG) << "dequeueing outbound message"; vm::CellBuilder cb; @@ -1805,6 +2148,13 @@ bool Collator::dequeue_message(Ref msg_envelope, ton::LogicalTime deli } } +/** + * Cleans up the outbound message queue by removing messages that have already been imported by neighbors. + * + * Cleanup may be interrupted early if it takes too long. + * + * @returns True if the cleanup operation was successful, false otherwise. + */ bool Collator::out_msg_queue_cleanup() { LOG(INFO) << "cleaning outbound queue from messages already imported by neighbors"; if (verbosity >= 2) { @@ -1884,8 +2234,17 @@ bool Collator::out_msg_queue_cleanup() { return register_out_msg_queue_op(true); } +/** + * Creates a new Account object from the given address and serialized account data. + * + * @param addr A pointer to the 256-bit address of the account. + * @param account A cell slice with an account serialized using ShardAccount TLB-scheme. + * @param force_create A flag indicating whether to force the creation of a new account if `account` is null. + * + * @returns A unique pointer to the created Account object, or nullptr if the creation failed. + */ std::unique_ptr Collator::make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra, bool force_create) { + bool force_create) { if (account.is_null() && !force_create) { return nullptr; } @@ -1894,19 +2253,35 @@ std::unique_ptr Collator::make_account_from(td::ConstBitPtr addr if (!ptr->init_new(now_)) { return nullptr; } - } else if (!ptr->unpack(std::move(account), std::move(extra), now_, - is_masterchain() && config_->is_special_smartcontract(addr))) { + } else if (!ptr->unpack(std::move(account), now_, is_masterchain() && config_->is_special_smartcontract(addr))) { return nullptr; } ptr->block_lt = start_lt; return ptr; } +/** + * Looks up an account in the Collator's account map. + * + * @param addr A pointer to the 256-bit address of the account to be looked up. + * + * @returns A pointer to the Account object if found, otherwise returns nullptr. + */ block::Account* Collator::lookup_account(td::ConstBitPtr addr) const { auto found = accounts.find(addr); return found != accounts.end() ? found->second.get() : nullptr; } +/** + * Retreives an Account object from the data in the shard state. + * Accounts are cached in the Collator's map. + * + * @param addr The 256-bit address of the account. + * @param force_create Flag indicating whether to create a new account if it does not exist. + * + * @returns A Result object containing a pointer to the account if found or created successfully, or an error status. + * Returns nullptr if account does not exist and not force_create. + */ td::Result Collator::make_account(td::ConstBitPtr addr, bool force_create) { auto found = lookup_account(addr); if (found) { @@ -1918,7 +2293,7 @@ td::Result Collator::make_account(td::ConstBitPtr addr, bool fo return nullptr; } } - auto new_acc = make_account_from(addr, std::move(dict_entry.first), std::move(dict_entry.second), force_create); + auto new_acc = make_account_from(addr, std::move(dict_entry.first), force_create); if (!new_acc) { return td::Status::Error(PSTRING() << "cannot load account " << addr.to_hex(256) << " from previous state"); } @@ -1934,6 +2309,11 @@ td::Result Collator::make_account(td::ConstBitPtr addr, bool fo return ins.first->second.get(); } +/** + * Combines account transactions and updates the ShardAccountBlocks and ShardAccounts. + * + * @returns True if the operation is successful, false otherwise. + */ bool Collator::combine_account_transactions() { vm::AugmentedDictionary dict{256, block::tlb::aug_ShardAccountBlocks}; for (auto& z : accounts) { @@ -2046,6 +2426,15 @@ bool Collator::combine_account_transactions() { return true; } +/** + * Creates a special transaction to recover a specified amount of currency to a destination address. + * + * @param amount The amount of currency to recover. + * @param dest_addr_cell The cell containing the destination address. + * @param in_msg The reference to the input message. + * + * @returns True if the special transaction was created successfully, false otherwise. + */ bool Collator::create_special_transaction(block::CurrencyCollection amount, Ref dest_addr_cell, Ref& in_msg) { if (amount.is_zero()) { @@ -2086,12 +2475,27 @@ bool Collator::create_special_transaction(block::CurrencyCollection amount, Ref< return true; } +/** + * Creates special transactions for retreiving fees and minted currencies. + * Used in masterchain collator. + * + * @returns True if both special transactions were + */ bool Collator::create_special_transactions() { CHECK(is_masterchain()); return create_special_transaction(value_flow_.recovered, config_->get_config_param(3, 1), recover_create_msg_) && create_special_transaction(value_flow_.minted, config_->get_config_param(2, 0), mint_msg_); } +/** + * Creates a tick-tock transaction for a given smart contract. + * + * @param smc_addr The address of the smart contract. + * @param req_start_lt The requested start logical time for the transaction. + * @param mask The value indicating wheter the thansaction is tick (mask == 2) or tock (mask == 1). + * + * @returns True if the transaction was created successfully, false otherwise. + */ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask) { auto acc_res = make_account(smc_addr.cbits(), false); @@ -2145,6 +2549,13 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t return true; } +/** + * Creates an ordinary transaction using a given message. + * + * @param msg_root The root of the message to be processed serialized using Message TLB-scheme. + * + * @returns The root of the serialized transaction, or an empty reference if the transaction creation fails. + */ Ref Collator::create_ordinary_transaction(Ref msg_root) { ton::StdSmcAddress addr; auto cs = vm::load_cell_slice(msg_root); @@ -2219,8 +2630,23 @@ Ref Collator::create_ordinary_transaction(Ref msg_root) { return trans_root; } -// If td::status::error_code == 669 - Fatal Error block can not be produced -// if td::status::error_code == 701 - Transaction can not be included into block, but it's ok (external or too early internal) +/** + * Creates an ordinary transaction using given parameters. + * + * @param msg_root The root of the message to be processed serialized using Message TLB-scheme. + * @param acc The account for which the transaction is being created. + * @param utime The Unix time of the transaction. + * @param lt The minimal logical time of the transaction. + * @param storage_phase_cfg The configuration for the storage phase of the transaction. + * @param compute_phase_cfg The configuration for the compute phase of the transaction. + * @param action_phase_cfg The configuration for the action phase of the transaction. + * @param external Flag indicating if the message is external. + * @param after_lt The logical time after which the transaction should occur. Used only for external messages. + * + * @returns A Result object containing the created transaction. + * Returns error_code == 669 if the error is fatal and the block can not be produced. + * Returns error_code == 701 if the transaction can not be included into block, but it's ok (external or too early internal). + */ td::Result> Collator::impl_create_ordinary_transaction(Ref msg_root, block::Account* acc, UnixTime utime, LogicalTime lt, @@ -2301,6 +2727,11 @@ td::Result> Collator::impl_crea return std::move(trans); } +/** + * Updates the maximum logical time if the given logical time is greater than the current maximum logical time. + * + * @param lt The logical time to be compared. + */ void Collator::update_max_lt(ton::LogicalTime lt) { CHECK(lt >= start_lt); if (lt > max_lt) { @@ -2308,6 +2739,13 @@ void Collator::update_max_lt(ton::LogicalTime lt) { } } +/** + * Updates information on the last processed internal message with a new logical time and hash. + * + * @param new_lt_hash The new logical time and hash pair. + * + * @returns True if the last processed internal message was successfully updated, false otherwise. + */ bool Collator::update_last_proc_int_msg(const std::pair& new_lt_hash) { if (last_proc_int_msg_ < new_lt_hash) { last_proc_int_msg_ = new_lt_hash; @@ -2322,6 +2760,14 @@ bool Collator::update_last_proc_int_msg(const std::pair addr_ref) const { return is_our_address(block::tlb::t_MsgAddressInt.get_prefix(std::move(addr_ref))); } +/** + * Checks if the given account ID prefix belongs to the current shard. + * + * @param addr_pfx The account ID prefix to check. + * + * @returns True if the account ID prefix belongs to the current shard, False otherwise. + */ bool Collator::is_our_address(ton::AccountIdPrefixFull addr_pfx) const { return ton::shard_contains(shard_, addr_pfx); } +/** + * Checks if the given address belongs to the current shard. + * + * @param addr The address to check. + * + * @returns True if the address belongs to the current shard, False otherwise. + */ bool Collator::is_our_address(const ton::StdSmcAddress& addr) const { return ton::shard_contains(get_shard(), addr); } -// 1 = processed, 0 = enqueued, 3 = processed, all future messages must be enqueued +/** + * Processes a message generated in this block. + * + * @param msg The new message to be processed. + * @param enqueue_only Flag indicating whether the message should only be enqueued. + * @param is_special New message if creating a special transaction, nullptr otherwise. + * + * @returns Returns: + * 0 - message was enqueued. + * 1 - message was processed. + * 3 - message was processed, all future messages must be enqueued. + * -1 - error occured. + */ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, Ref* is_special) { Ref src, dest; bool enqueue, external; @@ -2460,11 +2939,22 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R return 1; } -// very similar to enqueue_message(), but for transit messages +/** + * Enqueues a transit message. + * Very similar to enqueue_message(), but for transit messages. + * + * @param msg The message to be enqueued. + * @param old_msg_env The previous message envelope. + * @param prev_prefix The account ID prefix for this shard. + * @param cur_prefix The account ID prefix for the next hop. + * @param dest_prefix The prefix of the destination account ID. + * @param fwd_fee_remaining The remaining forward fee. + * + * @returns True if the transit message is successfully enqueued, false otherwise. + */ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_env, ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix, - ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining, - ton::LogicalTime enqueued_lt) { + ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining) { LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256); bool requeue = is_our_address(prev_prefix); // 1. perform hypercube routing @@ -2535,6 +3025,13 @@ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_ return register_out_msg_queue_op(); } +/** + * Deletes a message from the outbound message queue. + * + * @param key The key of the message to be deleted. + * + * @returns True if the message was successfully deleted, false otherwise. + */ bool Collator::delete_out_msg_queue_msg(td::ConstBitPtr key) { Ref queue_rec; try { @@ -2550,6 +3047,17 @@ bool Collator::delete_out_msg_queue_msg(td::ConstBitPtr key) { return register_out_msg_queue_op(); } +/** + * Processes an inbound message from a neighbor's outbound queue. + * The message may create a transaction or be enqueued. + * + * @param enq_msg The inbound message serialized using EnqueuedMsg TLB-scheme. + * @param lt The logical time of the message. + * @param key The 32+64+256-bit key of the message. + * @param src_nb The description of the source neighbor shard. + * + * @returns True if the message was processed successfully, false otherwise. + */ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalTime lt, td::ConstBitPtr key, const block::McShardDescr& src_nb) { ton::LogicalTime enqueued_lt = 0; @@ -2676,7 +3184,7 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT // destination is outside our shard, relay transit message // (very similar to enqueue_message()) if (!enqueue_transit_message(std::move(env.msg), std::move(msg_env), cur_prefix, next_prefix, dest_prefix, - std::move(env.fwd_fee_remaining), max_lt)) { + std::move(env.fwd_fee_remaining))) { return fatal_error("cannot enqueue transit internal message with key "s + key.to_hex(352)); } return !our || delete_out_msg_queue_msg(key); @@ -2718,6 +3226,12 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT return true; } +/** + * Processes inbound internal messages from message queues of the neighbors. + * Messages are processed until the normal limit is reached, soft timeout is reached or there are no more messages. + * + * @returns True if the processing was successful, false otherwise. + */ bool Collator::process_inbound_internal_messages() { while (!block_full_ && !nb_out_msgs_->is_eof()) { block_full_ = !block_limit_status_->fits(block::ParamLimits::cl_normal); @@ -2752,6 +3266,12 @@ bool Collator::process_inbound_internal_messages() { return true; } +/** + * Processes inbound external messages. + * Messages are processed until the soft limit is reached, medium timeout is reached or there are no more messages. + * + * @returns True if the processing was successful, false otherwise. + */ bool Collator::process_inbound_external_messages() { if (skip_extmsg_) { LOG(INFO) << "skipping processing of inbound external messages"; @@ -2790,7 +3310,17 @@ bool Collator::process_inbound_external_messages() { return true; } -// 1 = processed, 0 = skipped, 3 = processed, all future messages must be skipped (block overflown) +/** + * Processes an external message. + * + * @param msg The message to be processed serialized as Message TLB-scheme. + * + * @returns The result of processing the message: + * -1 if a fatal error occurred. + * 0 if the message is rejected. + * 1 if the message was processed. + * 3 if the message was processed and all future messages must be skipped (block overflown). + */ int Collator::process_external_message(Ref msg) { auto cs = load_cell_slice(msg); td::RefInt256 fwd_fees; @@ -2827,7 +3357,13 @@ int Collator::process_external_message(Ref msg) { return 1; } -// inserts an InMsg into InMsgDescr +/** + * Inserts an InMsg into the block's InMsgDescr. + * + * @param in_msg The input message to be inserted. + * + * @returns True if the insertion is successful, false otherwise. + */ bool Collator::insert_in_msg(Ref in_msg) { if (verbosity > 2) { std::cerr << "InMsg being inserted into InMsgDescr: "; @@ -2862,7 +3398,13 @@ bool Collator::insert_in_msg(Ref in_msg) { ((in_descr_cnt_ & 63) || block_limit_status_->add_cell(in_msg_dict->get_root_cell())); } -// inserts an OutMsg into OutMsgDescr +/** + * Inserts an OutMsg into the block's OutMsgDescr. + * + * @param out_msg The outgoing message to be inserted. + * + * @returns True if the insertion was successful, false otherwise. + */ bool Collator::insert_out_msg(Ref out_msg) { if (verbosity > 2) { std::cerr << "OutMsg being inserted into OutMsgDescr: "; @@ -2885,6 +3427,14 @@ bool Collator::insert_out_msg(Ref out_msg) { return insert_out_msg(std::move(out_msg), msg->get_hash().bits()); } +/** + * Inserts an outgoing message into the block's OutMsgDescr dictionary. + * + * @param out_msg The outgoing message to be inserted. + * @param msg_hash The 256-bit hash of the outgoing message. + * + * @returns True if the insertion was successful, false otherwise. + */ bool Collator::insert_out_msg(Ref out_msg, td::ConstBitPtr msg_hash) { bool ok; try { @@ -2901,7 +3451,15 @@ bool Collator::insert_out_msg(Ref out_msg, td::ConstBitPtr msg_hash) { ((out_descr_cnt_ & 63) || block_limit_status_->add_cell(out_msg_dict->get_root_cell())); } -// enqueues a new Message into OutMsgDescr and OutMsgQueue +/** + * Enqueues a new message into the block's outbound message queue and OutMsgDescr. + * + * @param msg The new outbound message to enqueue. + * @param fwd_fees_remaining The remaining forward fees for the message. + * @param enqueued_lt The logical time at which the message is enqueued. + * + * @returns True if the message was successfully enqueued, false otherwise. + */ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt) { // 0. unpack src_addr and dest_addr block::gen::CommonMsgInfo::Record_int_msg_info info; @@ -2966,6 +3524,13 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema return register_out_msg_queue_op(); } +/** + * Processes new messages that were generated in this block. + * + * @param enqueue_only If true, only enqueue the new messages without creating transactions. + * + * @returns True if all new messages were processed successfully, false otherwise. + */ bool Collator::process_new_messages(bool enqueue_only) { while (!new_msgs.empty()) { block::NewOutMsg msg = new_msgs.top(); @@ -2987,6 +3552,11 @@ bool Collator::process_new_messages(bool enqueue_only) { return true; } +/** + * Registers a new output message. + * + * @param new_msg The new output message to be registered. + */ void Collator::register_new_msg(block::NewOutMsg new_msg) { if (new_msg.lt < min_new_msg_lt) { min_new_msg_lt = new_msg.lt; @@ -2995,6 +3565,11 @@ void Collator::register_new_msg(block::NewOutMsg new_msg) { block_limit_status_->extra_out_msgs++; } +/** + * Registers new messages that were created in the transaction. + * + * @param trans The transaction containing the messages. + */ void Collator::register_new_msgs(block::transaction::Transaction& trans) { CHECK(trans.root.not_null()); for (unsigned i = 0; i < trans.out_msgs.size(); i++) { @@ -3008,6 +3583,15 @@ void Collator::register_new_msgs(block::transaction::Transaction& trans) { * */ +/** + * Stores an external block reference to a CellBuilder object. + * + * @param cb The CellBuilder object to store the reference in. + * @param id_ext The block ID. + * @param end_lt The end logical time of the block. + * + * @returns True if the reference was successfully stored, false otherwise. + */ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, ton::LogicalTime end_lt) { return cb.store_long_bool(end_lt, 64) // end_lt:uint64 && cb.store_long_bool(id_ext.seqno(), 32) // seq_no:uint32 @@ -3015,6 +3599,15 @@ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, to && cb.store_bits_bool(id_ext.file_hash); // file_hash:bits256 } +/** + * Stores an external block reference to a CellBuilder. + * + * @param cb The CellBuilder to store the reference in. + * @param id_ext The block ID. + * @param blk_root The root of the block. + * + * @returns True if the reference was successfully stored, false otherwise. + */ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, Ref blk_root) { block::gen::Block::Record rec; block::gen::BlockInfo::Record info; @@ -3029,6 +3622,19 @@ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, Re && store_ext_blk_ref_to(cb, id_ext, info.end_lt); // store } +/** + * Updates one shard description in the masterchain shard configuration. + * Used in masterchain collator. + * + * @param info The shard information to be updated. + * @param sibling The sibling shard information. + * @param wc_info The workchain information. + * @param now The current Unix time. + * @param ccvc The Catchain validators configuration. + * @param update_cc Flag indicating whether to update the Catchain seqno. + * + * @returns A boolean value indicating whether the shard description has changed. + */ static int update_one_shard(block::McShardHash& info, const block::McShardHash* sibling, const block::WorkchainInfo* wc_info, ton::UnixTime now, const block::CatchainValidatorsConfig& ccvc, bool update_cc) { @@ -3081,6 +3687,16 @@ static int update_one_shard(block::McShardHash& info, const block::McShardHash* return changed; } +/** + * Updates the shard configuration in the masterchain. + * Used in masterchain collator. + * + * @param wc_set The set of workchains. + * @param ccvc The Catchain validators configuration. + * @param update_cc A boolean indicating whether to update the Catchain seqno. + * + * @returns True if the shard configuration was successfully updated, false otherwise. + */ bool Collator::update_shard_config(const block::WorkchainSet& wc_set, const block::CatchainValidatorsConfig& ccvc, bool update_cc) { LOG(DEBUG) << "updating shard configuration (update_cc=" << update_cc << ")"; @@ -3107,6 +3723,12 @@ bool Collator::update_shard_config(const block::WorkchainSet& wc_set, const bloc }); } +/** + * Creates McStateExtra. + * Used in masterchain collator. + * + * @returns True if the creation is successful, false otherwise. + */ bool Collator::create_mc_state_extra() { if (!is_masterchain()) { CHECK(mc_state_extra_.is_null()); @@ -3325,6 +3947,16 @@ bool Collator::create_mc_state_extra() { return true; } +/** + * Updates the `block_creator_stats_` for a given key. + * Used in masterchain collator. + * + * @param key The 256-bit key of the creator. + * @param shard_incr The increment value for the shardchain block counter. + * @param mc_incr The increment value for the masterchain block counter. + * + * @returns True if the block creator count was successfully updated, false otherwise. + */ bool Collator::update_block_creator_count(td::ConstBitPtr key, unsigned shard_incr, unsigned mc_incr) { LOG(DEBUG) << "increasing CreatorStats for " << key.to_hex(256) << " by (" << mc_incr << ", " << shard_incr << ")"; block::DiscountedCounter mc_cnt, shard_cnt; @@ -3351,6 +3983,17 @@ bool Collator::update_block_creator_count(td::ConstBitPtr key, unsigned shard_in return true; } +/** + * Determines if the creator count is outdated for a given key. + * Used in masterchain collator. + * + * @param key The key of the creator. + * @param cs The CellSlice containing the CreatorStats. + * + * @returns -1 if there was a fatal error. + * 0 if the CreatorStats should be removed as they are stale, + * 1 if the CreatorStats are still valid. + */ int Collator::creator_count_outdated(td::ConstBitPtr key, vm::CellSlice& cs) { block::DiscountedCounter mc_cnt, shard_cnt; if (!(block::fetch_CreatorStats(cs, mc_cnt, shard_cnt) && cs.empty_ext())) { @@ -3369,6 +4012,11 @@ int Collator::creator_count_outdated(td::ConstBitPtr key, vm::CellSlice& cs) { } } +/** + * Updates `block_create_stats_` using information about creators of all new blocks. + * + * @returns True if the update was successful, false otherwise. + */ bool Collator::update_block_creator_stats() { if (!create_stats_enabled_) { return true; @@ -3419,10 +4067,25 @@ bool Collator::update_block_creator_stats() { return cnt >= 0; } +/** + * Retrieves the global masterchain config from the config contract. + * + * @param cfg_addr The address of the configuration smart contract. + * + * @returns A Result object containing a reference to the configuration data. + */ td::Result> Collator::get_config_data_from_smc(const ton::StdSmcAddress& cfg_addr) { return block::get_config_data_from_smc(account_dict->lookup_ref(cfg_addr)); } +/** + * Fetches and validates a new configuration from the configuration smart contract. + * + * @param cfg_addr The address of the configuration smart contract. + * @param new_config A reference to a vm::Cell object to store the new configuration. + * + * @returns True if the new configuration was successfully fetched, false otherwise. + */ bool Collator::try_fetch_new_config(const ton::StdSmcAddress& cfg_addr, Ref& new_config) { auto cfg_res = get_config_data_from_smc(cfg_addr); if (cfg_res.is_error()) { @@ -3440,11 +4103,24 @@ bool Collator::try_fetch_new_config(const ton::StdSmcAddress& cfg_addr, Reflookup(key, 256); @@ -3527,6 +4213,17 @@ bool Collator::remove_public_library(td::ConstBitPtr key, td::ConstBitPtr addr) return true; } +/** + * Processes adding a library to the collection of public libraries of an account. + * Updates the global collection of public libraries. + * Used in masterchain collator. + * + * @param key The key of the public library. + * @param addr The address of the account where the library is added. + * @param library The root cell of the library. + * + * @returns True if the public library was successfully added, false otherwise. + */ bool Collator::add_public_library(td::ConstBitPtr key, td::ConstBitPtr addr, Ref library) { LOG(INFO) << "Adding public library " << key.to_hex(256) << " of account " << addr.to_hex(256); CHECK(library.not_null() && !library->get_hash().bits().compare(key, 256)); @@ -3565,6 +4262,17 @@ bool Collator::add_public_library(td::ConstBitPtr key, td::ConstBitPtr addr, Ref return true; } +/** + * Processes changes in libraries of an account. + * Updates the global collection of public libraries. + * Used in masterchain collator. + * + * @param orig_libs The original libraries of the account. + * @param final_libs The final libraries of the account. + * @param addr The address associated with the account. + * + * @returns True if the update was successful, false otherwise. + */ bool Collator::update_account_public_libraries(Ref orig_libs, Ref final_libs, const td::Bits256& addr) { vm::Dictionary dict1{std::move(orig_libs), 256}, dict2{std::move(final_libs), 256}; @@ -3582,6 +4290,13 @@ bool Collator::update_account_public_libraries(Ref orig_libs, Ref msg_q_info; vm::CellBuilder cb, cb2; @@ -3676,11 +4411,20 @@ bool Collator::create_shard_state() { return true; } -// stores BlkMasterInfo (for non-masterchain blocks) +/** + * Stores BlkMasterInfo (for non-masterchain blocks) in the provided CellBuilder. + * + * @param cb The CellBuilder to store the reference in. + * + * @returns True if the reference is successfully stored, false otherwise. + */ bool Collator::store_master_ref(vm::CellBuilder& cb) { return mc_block_root.not_null() && store_ext_blk_ref_to(cb, mc_block_id_, mc_block_root); } +/** + * Updates the processed_upto information for the new block based on the information on the last processed inbound message. + */ bool Collator::update_processed_upto() { auto ref_mc_seqno = is_masterchain() ? new_block_seqno : prev_mc_block_seqno; update_min_mc_seqno(ref_mc_seqno); @@ -3695,6 +4439,13 @@ bool Collator::update_processed_upto() { return processed_upto_->compactify(); } +/** + * Computes the outbound message queue. + * + * @param out_msg_queue_info A reference to a vm::Cell object to store the computed queue. + * + * @returns True if the computation is successful, False otherwise. + */ bool Collator::compute_out_msg_queue_info(Ref& out_msg_queue_info) { if (verbosity >= 2) { auto rt = out_msg_queue_->get_root(); @@ -3709,6 +4460,11 @@ bool Collator::compute_out_msg_queue_info(Ref& out_msg_queue_info) { && cb.finalize_to(out_msg_queue_info); } +/** + * Computes the total balance of the shard state. + * + * @returns True if the total balance computation is successful, false otherwise. + */ bool Collator::compute_total_balance() { // 1. compute total_balance_ from the augmentation value of ShardAccounts auto accounts_extra = account_dict->get_root_extra(); @@ -3759,6 +4515,13 @@ bool Collator::compute_total_balance() { return true; } +/** + * Creates BlockInfo of the new block. + * + * @param block_info A reference to the cell to put the serialized info to. + * + * @returns True if the block info cell was successfully created, false otherwise. + */ bool Collator::create_block_info(Ref& block_info) { vm::CellBuilder cb, cb2; bool mc = is_masterchain(); @@ -3796,10 +4559,24 @@ bool Collator::create_block_info(Ref& block_info) { && cb.finalize_to(block_info); } +/** + * Stores the version information in a CellBuilder. + * + * @param cb The CellBuilder object to store the version information. + * + * @returns True if the version information was successfully stored, false otherwise. + */ bool Collator::store_version(vm::CellBuilder& cb) const { return block::gen::t_GlobalVersion.pack_capabilities(cb, supported_version(), supported_capabilities()); } +/** + * Stores the zero state reference in the given CellBuilder. + * + * @param cb The CellBuilder to store the zero state reference in. + * + * @returns True if the zero state reference is successfully stored, false otherwise. + */ bool Collator::store_zero_state_ref(vm::CellBuilder& cb) { CHECK(prev_state_root_.not_null()); RootHash root_hash = prev_state_root_->get_hash().bits(); @@ -3812,6 +4589,14 @@ bool Collator::store_zero_state_ref(vm::CellBuilder& cb) { && cb.store_bits_bool(prev_blocks[0].file_hash); // file_hash:bits256 } +/** + * Stores the previous block references to the given CellBuilder. + * + * @param cb The CellBuilder object to store the references. + * @param is_after_merge A boolean indicating whether the new block after a merge. + * + * @returns True if the references are successfully stored, false otherwise. + */ bool Collator::store_prev_blk_ref(vm::CellBuilder& cb, bool is_after_merge) { if (is_after_merge) { auto root2 = prev_block_data.at(1)->root_cell(); @@ -3829,6 +4614,11 @@ bool Collator::store_prev_blk_ref(vm::CellBuilder& cb, bool is_after_merge) { } } +/** + * Validates the value flow of the block. + * + * @returns True if the value flow is correct, false otherwise. + */ bool Collator::check_value_flow() { if (!value_flow_.validate()) { LOG(ERROR) << "incorrect value flow in new block : " << value_flow_.to_str(); @@ -3838,6 +4628,13 @@ bool Collator::check_value_flow() { return true; } +/** + * Creates the BlockExtra of the new block. + * + * @param block_extra A reference to the cell to put the serialized info to. + * + * @returns True if the block extra data was successfully created, false otherwise. + */ bool Collator::create_block_extra(Ref& block_extra) { bool mc = is_masterchain(); Ref mc_block_extra; @@ -3853,6 +4650,14 @@ bool Collator::create_block_extra(Ref& block_extra) { && cb.finalize_to(block_extra); // = BlockExtra; } +/** + * Creates the McBlockExtra of the new masterchain block. + * Used in masterchain collator. + * + * @param mc_block_extra A reference to the cell to put the serialized info to. + * + * @returns True if the extra data was successfully created, false otherwise. + */ bool Collator::create_mc_block_extra(Ref& mc_block_extra) { if (!is_masterchain()) { return false; @@ -3870,6 +4675,18 @@ bool Collator::create_mc_block_extra(Ref& mc_block_extra) { && cb.finalize_to(mc_block_extra); // = McBlockExtra } +/** + * Serialized the new block. + * + * This function performs the following steps: + * 1. Creates a BlockInfo for the new block. + * 2. Checks the value flow for the new block. + * 3. Creates a BlockExtra for the new block. + * 4. Builds a new block using the created BlockInfo, value flow, state update, and BlockExtra. + * 5. Verifies the new block if the verification is enabled. + * + * @returns True if the new block is successfully created, false otherwise. + */ bool Collator::create_block() { Ref block_info, extra; if (!create_block_info(block_info)) { @@ -3908,6 +4725,15 @@ bool Collator::create_block() { return true; } +/** + * Collates the shard block description set. + * Used in masterchain collator. + * + * This function creates a dictionary and populates it with the shard block descriptions. + * + * @returns A `Ref` containing the serialized `TopBlockDescrSet` record. + * If serialization fails, an empty `Ref` is returned. + */ Ref Collator::collate_shard_block_descr_set() { vm::Dictionary dict{96}; for (const auto& descr : used_shard_block_descr_) { @@ -3932,6 +4758,11 @@ Ref Collator::collate_shard_block_descr_set() { return cell; } +/** + * Creates collated data for the block. + * + * @returns True if the collated data was successfully created, false otherwise. + */ bool Collator::create_collated_data() { // TODO: store something into collated_roots_ // 1. store the set of used shard block descriptions @@ -3947,6 +4778,18 @@ bool Collator::create_collated_data() { return true; } +/** + * Creates a block candidate for the Collator. + * + * This function serializes the new block and collated data, and creates a BlockCandidate object + * with the necessary information. It then checks if the size of the block candidate exceeds the + * limits specified in the consensus configuration. + * + * Finally, the block candidate is saved to the disk. + * If there are any bad external messages or delayed external messages, the ValidatorManager is called to handle them. + * + * @returns True if the block candidate was created successfully, false otherwise. + */ bool Collator::create_block_candidate() { // 1. serialize block LOG(INFO) << "serializing new Block"; @@ -4021,6 +4864,11 @@ bool Collator::create_block_candidate() { return true; } +/** + * Returns a block candidate to the Promise. + * + * @param saved The result of saving the block candidate to the disk. + */ void Collator::return_block_candidate(td::Result saved) { // 6. return data to the original "caller" if (saved.is_error()) { @@ -4042,6 +4890,18 @@ void Collator::return_block_candidate(td::Result saved) { * */ +/** + * Registers an external message to the list of external messages in the Collator. + * + * @param ext_msg The reference to the external message cell. + * @param ext_hash The hash of the external message. + * + * @returns Result indicating the success or failure of the registration. + * - If the external message is invalid, returns an error. + * - If the external message has been previously rejected, returns an error + * - If the external message has been previuosly registered and accepted, returns false. + * - Otherwise returns true. + */ td::Result Collator::register_external_message_cell(Ref ext_msg, const ExtMessage::Hash& ext_hash) { if (ext_msg->get_level() != 0) { return td::Status::Error("external message must have zero level"); @@ -4090,23 +4950,11 @@ td::Result Collator::register_external_message_cell(Ref ext_msg, return true; } -/* -td::Result Collator::register_external_message(td::Slice ext_msg_boc) { - if (ext_msg_boc.size() > max_ext_msg_size) { - return td::Status::Error("external message too large, rejecting"); - } - vm::BagOfCells boc; - auto res = boc.deserialize(ext_msg_boc); - if (res.is_error()) { - return res.move_as_error(); - } - if (boc.get_root_count() != 1) { - return td::Status::Error("external message is not a valid bag of cells"); // not a valid bag-of-Cells - } - return register_external_message_cell(boc.get_root_cell(0)); -} -*/ - +/** + * Callback function called after retrieving external messages. + * + * @param res The result of the external message retrieval operation. + */ void Collator::after_get_external_messages(td::Result>> res) { --pending; if (res.is_error()) { @@ -4130,44 +4978,6 @@ void Collator::after_get_external_messages(td::Result Collator::register_ihr_message_cell(Ref ihr_msg) { - return false; -} - -td::Result Collator::register_ihr_message(td::Slice ihr_msg_boc) { - if (ihr_msg_boc.size() > max_ihr_msg_size) { - return td::Status::Error("IHR message too large, rejecting"); - } - vm::BagOfCells boc; - auto res = boc.deserialize(ihr_msg_boc); - if (res.is_error()) { - return res.move_as_error(); - } - if (boc.get_root_count() != 1) { - return td::Status::Error("IHR message is not a valid bag of cells"); // not a valid bag-of-Cells - } - return register_ihr_message_cell(boc.get_root_cell(0)); -} - -td::Result Collator::register_shard_signatures_cell(Ref signatures) { - return false; -} - -td::Result Collator::register_shard_signatures(td::Slice signatures_boc) { - if (signatures_boc.size() > max_blk_sign_size) { - return td::Status::Error("Shardchain signatures block too large, rejecting"); - } - vm::BagOfCells boc; - auto res = boc.deserialize(signatures_boc); - if (res.is_error()) { - return res.move_as_error(); - } - if (boc.get_root_count() != 1) { - return td::Status::Error("Shardchain signatures block is not a valid bag of cells"); // not a valid bag-of-Cells - } - return register_shard_signatures_cell(boc.get_root_cell(0)); -} - } // namespace validator } // namespace ton diff --git a/validator/impl/external-message.cpp b/validator/impl/external-message.cpp index 9383e734..5c1848aa 100644 --- a/validator/impl/external-message.cpp +++ b/validator/impl/external-message.cpp @@ -114,7 +114,7 @@ void ExtMessageQ::run_message(td::BufferSlice data, block::SizeLimitsConfig::Ext auto utime = std::get<1>(tuple); auto lt = std::get<2>(tuple); auto config = std::move(std::get<3>(tuple)); - if (!acc.unpack(shard_acc, {}, utime, false)) { + if (!acc.unpack(shard_acc, utime, false)) { promise.set_error(td::Status::Error(PSLICE() << "Failed to unpack account state")); } else { auto status = run_message_on_account(wc, &acc, utime, lt + 1, msg_root, std::move(config)); diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 4e4100f5..b134e1b3 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -39,6 +39,11 @@ namespace validator { using td::Ref; using namespace std::literals::string_literals; +/** + * Converts the error context to a string representation to show it in case of validation error. + * + * @returns The error context as a string. + */ std::string ErrorCtx::as_string() const { std::string a; for (const auto& s : entries_) { @@ -48,6 +53,20 @@ std::string ErrorCtx::as_string() const { return a; } +/** + * Constructs a ValidateQuery object. + * + * @param shard The shard of the block being validated. + * @param min_ts The minimum allowed UnixTime for the block. + * @param min_masterchain_block_id The minimum allowed masterchain block reference for the block. + * @param prev A vector of BlockIdExt representing the previous blocks. + * @param candidate The BlockCandidate to be validated. + * @param validator_set A reference to the ValidatorSet. + * @param manager The ActorId of the ValidatorManager. + * @param timeout The timeout for the validation. + * @param promise The Promise to return the ValidateCandidateResult to. + * @param is_fake A boolean indicating if the validation is fake (performed when creating a hardfork). + */ ValidateQuery::ValidateQuery(ShardIdFull shard, UnixTime min_ts, BlockIdExt min_masterchain_block_id, std::vector prev, BlockCandidate candidate, Ref validator_set, td::actor::ActorId manager, td::Timestamp timeout, @@ -71,14 +90,30 @@ ValidateQuery::ValidateQuery(ShardIdFull shard, UnixTime min_ts, BlockIdExt min_ proc_hash_.zero(); } +/** + * Raises an error when timeout is reached. + */ void ValidateQuery::alarm() { abort_query(td::Status::Error(ErrorCode::timeout, "timeout")); } +/** + * Aborts the validation with the given error. + * + * @param error The error encountered. + */ void ValidateQuery::abort_query(td::Status error) { (void)fatal_error(std::move(error)); } +/** + * Rejects the validation and logs an error message. + * + * @param error The error message to be logged. + * @param reason The reason for rejecting the validation. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::reject_query(std::string error, td::BufferSlice reason) { error = error_ctx() + error; LOG(ERROR) << "REJECT: aborting validation of block candidate for " << shard_.to_str() << " : " << error; @@ -94,11 +129,28 @@ bool ValidateQuery::reject_query(std::string error, td::BufferSlice reason) { return false; } +/** + * Rejects the validation and logs an error message. + * + * @param err_msg The error message to be displayed. + * @param error The error status. + * @param reason The reason for rejecting the query. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::reject_query(std::string err_msg, td::Status error, td::BufferSlice reason) { error.ensure_error(); return reject_query(err_msg + " : " + error.to_string(), std::move(reason)); } +/** + * Rejects the validation and logs an error message. + * + * @param error The error message to be logged. + * @param reason The reason for rejecting the validation. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::soft_reject_query(std::string error, td::BufferSlice reason) { error = error_ctx() + error; LOG(ERROR) << "SOFT REJECT: aborting validation of block candidate for " << shard_.to_str() << " : " << error; @@ -114,6 +166,13 @@ bool ValidateQuery::soft_reject_query(std::string error, td::BufferSlice reason) return false; } +/** + * Handles a fatal error during validation. + * + * @param error The error status. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(td::Status error) { error.ensure_error(); LOG(ERROR) << "aborting validation of block candidate for " << shard_.to_str() << " : " << error.to_string(); @@ -132,19 +191,47 @@ bool ValidateQuery::fatal_error(td::Status error) { return false; } +/** + * Handles a fatal error during validation. + * + * @param err_code Error code. + * @param error Error message. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(int err_code, std::string err_msg) { return fatal_error(td::Status::Error(err_code, error_ctx() + err_msg)); } +/** + * Handles a fatal error during validation. + * + * @param err_code Error code. + * @param err_msg Error message. + * @param error Error status. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(int err_code, std::string err_msg, td::Status error) { error.ensure_error(); return fatal_error(err_code, err_msg + " : " + error.to_string()); } +/** + * Handles a fatal error during validation. + * + * @param error Error message. + * @param err_code Error code. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(std::string err_msg, int err_code) { return fatal_error(td::Status::Error(err_code, error_ctx() + err_msg)); } +/** + * Finishes the query and sends the result to the promise. + */ void ValidateQuery::finish_query() { if (main_promise) { main_promise.set_result(now_); @@ -158,6 +245,12 @@ void ValidateQuery::finish_query() { * */ +/** + * Starts the validation process. + * + * This function performs various checks on the validation parameters and the block candidate. + * Then the function also sends requests to the ValidatorManager to fetch blocks and shard stated. + */ void ValidateQuery::start_up() { LOG(INFO) << "validate query for " << block_candidate.id.to_str() << " started"; alarm_timestamp() = timeout; @@ -307,7 +400,16 @@ void ValidateQuery::start_up() { CHECK(pending); } -// unpack block candidate, and check root hash and file hash +/** + * Unpacks and validates a block candidate. + * + * This function unpacks the block candidate data and performs various validation checks to ensure its integrity. + * It checks the file hash and root hash of the block candidate against the expected values. + * It then parses the block header and checks its validity. + * Finally, it deserializes the collated data and extracts the collated roots. + * + * @returns True if the block candidate was successfully unpacked, false otherwise. + */ bool ValidateQuery::unpack_block_candidate() { vm::BagOfCells boc1, boc2; // 1. deserialize block itself @@ -359,6 +461,11 @@ bool ValidateQuery::unpack_block_candidate() { return extract_collated_data(); } +/** + * Initializes the validation by parsing and checking the block header. + * + * @returns True if the initialization is successful, false otherwise. + */ bool ValidateQuery::init_parse() { CHECK(block_root_.not_null()); std::vector prev_blks; @@ -486,6 +593,14 @@ bool ValidateQuery::init_parse() { return true; } +/** + * Extracts collated data from a cell. + * + * @param croot The root cell containing the collated data. + * @param idx The index of the root. + * + * @returns True if the extraction is successful, false otherwise. + */ bool ValidateQuery::extract_collated_data_from(Ref croot, int idx) { bool is_special = false; auto cs = vm::load_cell_slice_special(croot, is_special); @@ -523,7 +638,11 @@ bool ValidateQuery::extract_collated_data_from(Ref croot, int idx) { return true; } -// processes further and sorts data in collated_roots_ +/** + * Extracts collated data from a list of collated roots. + * + * @returns True if the extraction is successful, False otherwise. + */ bool ValidateQuery::extract_collated_data() { int i = -1; for (auto croot : collated_roots_) { @@ -542,6 +661,11 @@ bool ValidateQuery::extract_collated_data() { return true; } +/** + * Callback function called after retrieving the latest masterchain state. + * + * @param res The result of the retrieval of the latest masterchain state. + */ void ValidateQuery::after_get_latest_mc_state(td::Result, BlockIdExt>> res) { LOG(DEBUG) << "in ValidateQuery::after_get_latest_mc_state()"; --pending; @@ -578,6 +702,11 @@ void ValidateQuery::after_get_latest_mc_state(td::Result> res) { LOG(DEBUG) << "in ValidateQuery::after_get_mc_state() for " << mc_blkid_.to_str(); --pending; @@ -596,6 +725,11 @@ void ValidateQuery::after_get_mc_state(td::Result> res) { } } +/** + * Callback function for handling the result of retrieving a masterchain block handle referenced in the block. + * + * @param res The result of retrieving the masterchain block handle. + */ void ValidateQuery::got_mc_handle(td::Result res) { LOG(DEBUG) << "in ValidateQuery::got_mc_handle() for " << mc_blkid_.to_str(); --pending; @@ -611,6 +745,12 @@ void ValidateQuery::got_mc_handle(td::Result res) { } } +/** + * Callback function called after retrieving the shard state for a previous block. + * + * @param idx The index of the previous block (0 or 1). + * @param res The result of the shard state retrieval. + */ void ValidateQuery::after_get_shard_state(int idx, td::Result> res) { LOG(DEBUG) << "in ValidateQuery::after_get_shard_state(" << idx << ")"; --pending; @@ -643,6 +783,13 @@ void ValidateQuery::after_get_shard_state(int idx, td::Result> r } } +/** + * Processes the retreived masterchain state. + * + * @param mc_state The reference to the masterchain state. + * + * @returns True if the masterchain state is successfully processed, false otherwise. + */ bool ValidateQuery::process_mc_state(Ref mc_state) { if (mc_state.is_null()) { return fatal_error("could not obtain reference masterchain state "s + mc_blkid_.to_str()); @@ -664,6 +811,11 @@ bool ValidateQuery::process_mc_state(Ref mc_state) { return register_mc_state(mc_state_); } +/** + * Tries to unpack the masterchain state and perform necessary checks. + * + * @returns True if the unpacking and checks are successful, false otherwise. + */ bool ValidateQuery::try_unpack_mc_state() { LOG(DEBUG) << "unpacking reference masterchain state"; auto guard = error_ctx_add_guard("unpack last mc state"); @@ -750,7 +902,12 @@ bool ValidateQuery::try_unpack_mc_state() { return true; } -// almost the same as in Collator +/** + * Fetches and validates configuration parameters from the masterchain configuration. + * Almost the same as in Collator. + * + * @returns True if all configuration parameters were successfully fetched and validated, false otherwise. + */ bool ValidateQuery::fetch_config_params() { old_mparams_ = config_->get_config_param(9); { @@ -839,7 +996,16 @@ bool ValidateQuery::fetch_config_params() { return true; } -// almost the same as in Collator +/** + * Checks the previous block against the block registered in the masterchain. + * Almost the same as in Collator. + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * @param chk_chain_len Flag indicating whether to check the chain length. + * + * @returns True if the previous block is valid, false otherwise. + */ bool ValidateQuery::check_prev_block(const BlockIdExt& listed, const BlockIdExt& prev, bool chk_chain_len) { if (listed.seqno() > prev.seqno()) { return reject_query(PSTRING() << "cannot generate a shardchain block after previous block " << prev.to_str() @@ -859,7 +1025,15 @@ bool ValidateQuery::check_prev_block(const BlockIdExt& listed, const BlockIdExt& return true; } -// almost the same as in Collator +/** + * Checks the previous block against the block registered in the masterchain. + * Almost the same as in Collator + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * + * @returns True if the previous block is equal to the one registered in the masterchain, false otherwise. + */ bool ValidateQuery::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev) { if (listed != prev) { return reject_query(PSTRING() << "cannot generate shardchain block for shard " << shard_.to_str() @@ -870,8 +1044,12 @@ bool ValidateQuery::check_prev_block_exact(const BlockIdExt& listed, const Block return true; } -// almost the same as in Collator -// (main change: fatal_error -> reject_query) +/** + * Checks the validity of the shard configuration of the current shard. + * Almost the same as in Collator (main change: fatal_error -> reject_query). + * + * @returns True if the shard's configuration is valid, False otherwise. + */ bool ValidateQuery::check_this_shard_mc_info() { wc_info_ = config_->get_workchain_info(workchain()); if (wc_info_.is_null()) { @@ -1016,6 +1194,11 @@ bool ValidateQuery::check_this_shard_mc_info() { * */ +/** + * Computes the previous shard state. + * + * @returns True if the previous state is computed successfully, false otherwise. + */ bool ValidateQuery::compute_prev_state() { CHECK(prev_states.size() == 1u + after_merge_); // Extend validator timeout if previous block is too old @@ -1045,6 +1228,9 @@ bool ValidateQuery::compute_prev_state() { return true; } +/** + * Computes the next shard state using the previous state and the block's Merkle update. + */ bool ValidateQuery::compute_next_state() { LOG(DEBUG) << "computing next state"; auto res = vm::MerkleUpdate::validate(state_update_); @@ -1120,7 +1306,13 @@ bool ValidateQuery::compute_next_state() { return true; } -// similar to Collator::unpack_merge_last_state() +/** + * Unpacks and merges the states of two previous blocks. + * Used if the block is after_merge. + * Similar to Collator::unpack_merge_last_state() + * + * @returns True if the unpacking and merging was successful, false otherwise. + */ bool ValidateQuery::unpack_merge_prev_state() { LOG(DEBUG) << "unpack/merge previous states"; CHECK(prev_states.size() == 2); @@ -1148,7 +1340,13 @@ bool ValidateQuery::unpack_merge_prev_state() { return true; } -// similar to Collator::unpack_last_state() +/** + * Unpacks the state of the previous block. + * Used if the block is not after_merge. + * Similar to Collator::unpack_last_state() + * + * @returns True if the unpacking is successful, false otherwise. + */ bool ValidateQuery::unpack_prev_state() { LOG(DEBUG) << "unpacking previous state(s)"; CHECK(prev_state_root_.not_null()); @@ -1163,7 +1361,16 @@ bool ValidateQuery::unpack_prev_state() { return unpack_one_prev_state(ps_, prev_blocks.at(0), prev_state_root_) && (!after_split_ || split_prev_state(ps_)); } -// similar to Collator::unpack_one_last_state() +/** + * Unpacks the state of a previous block and performs necessary checks. + * Similar to Collator::unpack_one_last_state() + * + * @param ss The ShardState object to unpack the state into. + * @param blkid The BlockIdExt of the previous block. + * @param prev_state_root The root of the state. + * + * @returns True if the unpacking and checks are successful, false otherwise. + */ bool ValidateQuery::unpack_one_prev_state(block::ShardState& ss, BlockIdExt blkid, Ref prev_state_root) { auto res = ss.unpack_state_ext(blkid, std::move(prev_state_root), global_id_, mc_seqno_, after_split_, after_split_ | after_merge_, [this](ton::BlockSeqno mc_seqno) { @@ -1180,7 +1387,15 @@ bool ValidateQuery::unpack_one_prev_state(block::ShardState& ss, BlockIdExt blki return true; } -// similar to Collator::split_last_state() +/** + * Splits the state of previous block. + * Used if the block is after_split. + * Similar to Collator::split_last_state() + * + * @param ss The ShardState object representing the previous state. The result is stored here. + * + * @returns True if the split operation is successful, false otherwise. + */ bool ValidateQuery::split_prev_state(block::ShardState& ss) { LOG(INFO) << "Splitting previous state " << ss.id_.to_str() << " to subshard " << shard_.to_str(); CHECK(after_split_); @@ -1202,6 +1417,11 @@ bool ValidateQuery::split_prev_state(block::ShardState& ss) { return true; } +/** + * Unpacks the next state (obtained by applying the Merkle update) and performs checks. + * + * @returns True if the next state is successfully unpacked and passes all checks, false otherwise. + */ bool ValidateQuery::unpack_next_state() { LOG(DEBUG) << "unpacking new state"; CHECK(state_root_.not_null()); @@ -1231,7 +1451,12 @@ bool ValidateQuery::unpack_next_state() { return true; } -// almost the same as in Collator +/** + * Requests the message queues of neighboring shards. + * Almost the same as in Collator. + * + * @returns True if the request for neighbor message queues was successful, false otherwise. + */ bool ValidateQuery::request_neighbor_queues() { CHECK(new_shard_conf_); auto neighbor_list = new_shard_conf_->get_neighbor_shard_hash_ids(shard_); @@ -1261,7 +1486,13 @@ bool ValidateQuery::request_neighbor_queues() { return true; } -// almost the same as in Collator +/** + * Handles the result of obtaining the outbound queue for a neighbor. + * Almost the same as in Collator. + * + * @param i The index of the neighbor. + * @param res The obtained outbound queue. + */ void ValidateQuery::got_neighbor_out_queue(int i, td::Result> res) { LOG(DEBUG) << "obtained outbound queue for neighbor #" << i; --pending; @@ -1323,7 +1554,14 @@ void ValidateQuery::got_neighbor_out_queue(int i, td::Result> } } -// almost the same as in Collator +/** + * Registers a masterchain state. + * Almost the same as in Collator. + * + * @param other_mc_state The masterchain state to register. + * + * @returns True if the registration is successful, false otherwise. + */ bool ValidateQuery::register_mc_state(Ref other_mc_state) { if (other_mc_state.is_null() || mc_state_.is_null()) { return false; @@ -1349,7 +1587,15 @@ bool ValidateQuery::register_mc_state(Ref other_mc_state) { return true; } -// almost the same as in Collator +/** + * Requests the auxiliary masterchain state. + * Almost the same as in Collator + * + * @param seqno The seqno of the block. + * @param state A reference to the auxiliary masterchain state. + * + * @returns True if the auxiliary masterchain state is successfully requested, false otherwise. + */ bool ValidateQuery::request_aux_mc_state(BlockSeqno seqno, Ref& state) { if (mc_state_.is_null()) { return fatal_error(PSTRING() << "cannot find masterchain block with seqno " << seqno @@ -1383,7 +1629,14 @@ bool ValidateQuery::request_aux_mc_state(BlockSeqno seqno, Ref ValidateQuery::get_aux_mc_state(BlockSeqno seqno) const { auto it = aux_mc_states_.find(seqno); if (it != aux_mc_states_.end()) { @@ -1393,7 +1646,14 @@ Ref ValidateQuery::get_aux_mc_state(BlockSeqno seqno) const { } } -// almost the same as in Collator +/** + * Callback function called after retrieving the auxiliary shard state. + * Handles the retrieved shard state and performs necessary checks and registrations. + * Almost the same as in Collator. + * + * @param blkid The BlockIdExt of the shard state. + * @param res The result of retrieving the shard state. + */ void ValidateQuery::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result> res) { LOG(DEBUG) << "in ValidateQuery::after_get_aux_shard_state(" << blkid.to_str() << ")"; --pending; @@ -1420,6 +1680,17 @@ void ValidateQuery::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result< } // similar to Collator::update_one_shard() +/** + * Checks one shard description in the masterchain shard configuration. + * Used in masterchain validation. + * + * @param info The shard information to be updated. + * @param sibling The sibling shard information. + * @param wc_info The workchain information. + * @param ccvc The Catchain validators configuration. + * + * @returns True if the validation wasa successful, false othewise. + */ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block::McShardHash* sibling, const block::WorkchainInfo* wc_info, const block::CatchainValidatorsConfig& ccvc) { auto shard = info.shard(); @@ -1706,8 +1977,14 @@ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block: return true; } -// checks old_shard_conf_ -> new_shard_conf_ transition using top_shard_descr_dict_ from collated data -// similar to Collator::update_shard_config() +/** + * Checks the shard configuration in the masterchain. + * Used in masterchain collator. + * Checks old_shard_conf_ -> new_shard_conf_ transition using top_shard_descr_dict_ from collated data. + * Similar to Collator::update_shard_config() + * + * @returns True if the shard layout is valid, false otherwise. + */ bool ValidateQuery::check_shard_layout() { prev_now_ = config_->utime; if (prev_now_ > now_) { @@ -1761,7 +2038,14 @@ bool ValidateQuery::check_shard_layout() { return check_mc_validator_info(is_key_block_ || (now_ / ccvc.mc_cc_lifetime > prev_now_ / ccvc.mc_cc_lifetime)); } -// similar to Collator::register_shard_block_creators +/** + * Registers the shard block creators to block_create_count_ + * Similar to Collator::register_shard_block_creators + * + * @param creator_list A vector of Bits256 representing the shard block creators. + * + * @returns True if the registration was successful, False otherwise. + */ bool ValidateQuery::register_shard_block_creators(std::vector creator_list) { for (const auto& x : creator_list) { LOG(DEBUG) << "registering block creator " << x.to_hex(); @@ -1776,7 +2060,12 @@ bool ValidateQuery::register_shard_block_creators(std::vector creat return true; } -// similar to Collator::check_cur_validator_set() +/** + * Checks that the current validator set is entitled to create blocks in this shard and has a correct catchain seqno. + * Similar to Collator::check_cur_validator_set() + * + * @returns True if the current validator set is valid, false otherwise. + */ bool ValidateQuery::check_cur_validator_set() { CatchainSeqno cc_seqno = 0; auto nodes = config_->compute_validator_set_cc(shard_, now_, &cc_seqno); @@ -1799,8 +2088,14 @@ bool ValidateQuery::check_cur_validator_set() { return true; } -// parallel to 4. of Collator::create_mc_state_extra() -// checks validator_info in mc_state_extra +/** + * Checks validator_info in mc_state_extra. + * NB: could be run in parallel to 4. of Collator::create_mc_state_extra() + * + * @param update_mc_cc Flag indicating whether the masterchain catchain seqno should be updated. + * + * @returns True if the validator information is valid, false otherwise. + */ bool ValidateQuery::check_mc_validator_info(bool update_mc_cc) { block::gen::McStateExtra::Record old_state_extra; block::gen::ValidatorInfo::Record old_val_info; @@ -1842,6 +2137,11 @@ bool ValidateQuery::check_mc_validator_info(bool update_mc_cc) { return true; } +/** + * Checks if the Unix time and logical time of the block are valid. + * + * @returns True if the utime and logical time pass checks, False otherwise. + */ bool ValidateQuery::check_utime_lt() { if (start_lt_ <= ps_.lt_) { return reject_query(PSTRING() << "block has start_lt " << start_lt_ << " less than or equal to lt " << ps_.lt_ @@ -1888,8 +2188,16 @@ bool ValidateQuery::check_utime_lt() { * */ -// almost the same as in Collator -// (but it can take into account the new state of the masterchain) +/** + * Adjusts one entry from the processed up to information using the masterchain state that is referenced in the entry. + * Almost the same as in Collator (but it can take into account the new state of the masterchain). + * + * @param proc The MsgProcessedUpto object. + * @param owner The shard that the MsgProcessesUpto information is taken from. + * @param allow_cur Allow using the new state of the msaterchain. + * + * @returns True if the processed up to information was successfully adjusted, false otherwise. + */ bool ValidateQuery::fix_one_processed_upto(block::MsgProcessedUpto& proc, ton::ShardIdFull owner, bool allow_cur) { if (proc.compute_shard_end_lt) { return true; @@ -1912,7 +2220,15 @@ bool ValidateQuery::fix_one_processed_upto(block::MsgProcessedUpto& proc, ton::S return (bool)proc.compute_shard_end_lt; } -// almost the same as in Collator +/** + * Adjusts the processed up to collection using the using the auxilliary masterchain states. + * Almost the same as in Collator. + * + * @param upto The MsgProcessedUptoCollection to be adjusted. + * @param allow_cur Allow using the new state of the msaterchain. + * + * @returns True if all entries were successfully adjusted, False otherwise. + */ bool ValidateQuery::fix_processed_upto(block::MsgProcessedUptoCollection& upto, bool allow_cur) { for (auto& entry : upto.list) { if (!fix_one_processed_upto(entry, upto.owner, allow_cur)) { @@ -1922,6 +2238,11 @@ bool ValidateQuery::fix_processed_upto(block::MsgProcessedUptoCollection& upto, return true; } +/** + * Adjusts the processed_upto values for all shard states, including neighbors. + * + * @returns True if all processed_upto values were successfully adjusted, false otherwise. + */ bool ValidateQuery::fix_all_processed_upto() { CHECK(ps_.processed_upto_); if (!fix_processed_upto(*ps_.processed_upto_)) { @@ -1942,7 +2263,13 @@ bool ValidateQuery::fix_all_processed_upto() { return true; } -// almost the same as in Collator +/** + * Adds trivials neighbor after merging two shards. + * Trivial neighbors are the two previous blocks. + * Almost the same as in Collator. + * + * @returns True if the operation is successful, false otherwise. + */ bool ValidateQuery::add_trivial_neighbor_after_merge() { LOG(DEBUG) << "in add_trivial_neighbor_after_merge()"; CHECK(prev_blocks.size() == 2); @@ -1977,7 +2304,13 @@ bool ValidateQuery::add_trivial_neighbor_after_merge() { return true; } -// almost the same as in Collator +/** + * Adds a trivial neighbor. + * A trivial neighbor is the previous block. + * Almost the same as in Collator. + * + * @returns True if the operation is successful, false otherwise. + */ bool ValidateQuery::add_trivial_neighbor() { LOG(DEBUG) << "in add_trivial_neighbor()"; if (after_merge_) { @@ -2113,6 +2446,11 @@ bool ValidateQuery::add_trivial_neighbor() { return true; } +/** + * Unpacks block data and performs validation checks. + * + * @returns True if the block data is successfully unpacked and passes all validation checks, false otherwise. + */ bool ValidateQuery::unpack_block_data() { LOG(DEBUG) << "unpacking block structures"; block::gen::Block::Record blk; @@ -2152,6 +2490,13 @@ bool ValidateQuery::unpack_block_data() { return unpack_precheck_value_flow(std::move(blk.value_flow)); } +/** + * Validates and unpacks the value flow of a new block. + * + * @param value_flow_root The root of the value flow to be unpacked and validated. + * + * @returns True if the value flow is valid and unpacked successfully, false otherwise. + */ bool ValidateQuery::unpack_precheck_value_flow(Ref value_flow_root) { vm::CellSlice cs{vm::NoVmOrd(), value_flow_root}; if (!(cs.is_valid() && value_flow_.fetch(cs) && cs.empty_ext())) { @@ -2269,7 +2614,14 @@ bool ValidateQuery::unpack_precheck_value_flow(Ref value_flow_root) { return true; } -// similar to Collator::compute_minted_amount() +/** + * Computes the amount of extra currencies to be minted. + * Similar to Collator::compute_minted_amount() + * + * @param to_mint A reference to the CurrencyCollection object to store the minted amount. + * + * @returns True if the computation is successful, false otherwise. + */ bool ValidateQuery::compute_minted_amount(block::CurrencyCollection& to_mint) { if (!is_masterchain()) { return to_mint.set_zero(); @@ -2321,6 +2673,15 @@ bool ValidateQuery::compute_minted_amount(block::CurrencyCollection& to_mint) { return true; } +/** + * Pre-validates the update of an account in a query. + * + * @param acc_id The 256-bit account address. + * @param old_value The old value of the account serialized as ShardAccount. Can be null. + * @param new_value The new value of the account serialized as ShardAccount. Can be null. + * + * @returns True if the accounts passes preliminary checks, false otherwise. + */ bool ValidateQuery::precheck_one_account_update(td::ConstBitPtr acc_id, Ref old_value, Ref new_value) { LOG(DEBUG) << "checking update of account " << acc_id.to_hex(256); @@ -2380,6 +2741,11 @@ bool ValidateQuery::precheck_one_account_update(td::ConstBitPtr acc_id, Ref trans_csr, ton::Bits256& prev_trans_hash, ton::LogicalTime& prev_trans_lt, unsigned& prev_trans_lt_len, @@ -2467,6 +2846,14 @@ bool ValidateQuery::precheck_one_transaction(td::ConstBitPtr acc_id, ton::Logica } // NB: could be run in parallel for different accounts +/** + * Pre-validates an AccountBlock and all transactions in it. + * + * @param acc_id The 256-bit account address. + * @param acc_blk_root The root of the AccountBlock. + * + * @returns True if the AccountBlock passes pre-checks, false otherwise. + */ bool ValidateQuery::precheck_one_account_block(td::ConstBitPtr acc_id, Ref acc_blk_root) { LOG(DEBUG) << "checking AccountBlock for " << acc_id.to_hex(256); if (!acc_id.equals(shard_pfx_.bits(), shard_pfx_len_)) { @@ -2548,6 +2935,11 @@ bool ValidateQuery::precheck_one_account_block(td::ConstBitPtr acc_id, Ref ValidateQuery::lookup_transaction(const ton::StdSmcAddress& addr, ton::LogicalTime lt) const { CHECK(account_blocks_dict_); block::gen::AccountBlock::Record ab_rec; @@ -2578,7 +2978,13 @@ Ref ValidateQuery::lookup_transaction(const ton::StdSmcAddress& addr, return trans_dict.lookup_ref(td::BitArray<64>{(long long)lt}); } -// checks that a ^Transaction refers to a transaction present in the ShardAccountBlocks +/** + * Checks that a Transaction cell refers to a transaction present in the ShardAccountBlocks. + * + * @param trans_ref The reference to the serialized transaction root. + * + * @returns True if the transaction reference is valid, False otherwise. + */ bool ValidateQuery::is_valid_transaction_ref(Ref trans_ref) const { ton::StdSmcAddress addr; ton::LogicalTime lt; @@ -2598,8 +3004,16 @@ bool ValidateQuery::is_valid_transaction_ref(Ref trans_ref) const { return true; } -// checks that any change in OutMsgQueue in the state is accompanied by an OutMsgDescr record in the block -// also checks that the keys are correct +/** + * Checks that any change in OutMsgQueue in the state is accompanied by an OutMsgDescr record in the block. + * Also checks that the keys are correct. + * + * @param out_msg_id The 32+64+256-bit ID of the outbound message. + * @param old_value The old value of the message queue entry. + * @param new_value The new value of the message queue entry. + * + * @returns True if the update is valid, false otherwise. + */ bool ValidateQuery::precheck_one_message_queue_update(td::ConstBitPtr out_msg_id, Ref old_value, Ref new_value) { LOG(DEBUG) << "checking update of enqueued outbound message " << out_msg_id.get_int(32) << ":" @@ -2765,6 +3179,11 @@ bool ValidateQuery::precheck_one_message_queue_update(td::ConstBitPtr out_msg_id return true; } +/** + * Performs a pre-check on the difference between the old and new outbound message queues. + * + * @returns True if the pre-check is successful, false otherwise. + */ bool ValidateQuery::precheck_message_queue_update() { LOG(INFO) << "pre-checking the difference between the old and the new outbound message queues"; try { @@ -2787,6 +3206,14 @@ bool ValidateQuery::precheck_message_queue_update() { return true; } +/** + * Updates the maximum processed logical time and hash value. + * + * @param lt The logical time to compare against the current maximum processed logical time. + * @param hash The hash value to compare against the current maximum processed hash value. + * + * @returns True if the update was successful, false otherwise. + */ bool ValidateQuery::update_max_processed_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash) { if (proc_lt_ < lt || (proc_lt_ == lt && proc_hash_ < hash)) { proc_lt_ = lt; @@ -2795,6 +3222,14 @@ bool ValidateQuery::update_max_processed_lt_hash(ton::LogicalTime lt, const ton: return true; } +/** + * Updates the minimum enqueued logical time and hash values. + * + * @param lt The logical time to compare. + * @param hash The hash value to compare. + * + * @returns True if the update was successful, false otherwise. + */ bool ValidateQuery::update_min_enqueued_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash) { if (lt < min_enq_lt_ || (lt == min_enq_lt_ && hash < min_enq_hash_)) { min_enq_lt_ = lt; @@ -2803,7 +3238,13 @@ bool ValidateQuery::update_min_enqueued_lt_hash(ton::LogicalTime lt, const ton:: return true; } -// check that the enveloped message (MsgEnvelope) was present in the output queue of a neighbor, and that it has not been processed before +/** + * Checks that the MsgEnvelope was present in the output queue of a neighbor, and that it has not been processed before. + * + * @param msg_env The message envelope of the imported message. + * + * @returns True if the imported internal message passes checks, false otherwise. + */ bool ValidateQuery::check_imported_message(Ref msg_env) { block::tlb::MsgEnvelope::Record_std env; block::gen::CommonMsgInfo::Record_int_msg_info info; @@ -2863,11 +3304,27 @@ bool ValidateQuery::check_imported_message(Ref msg_env) { " has previous address not belonging to any neighbor"); } +/** + * Checks if the given input message is a special message. + * A message is considered special if it recovers fees or mints extra currencies. + * + * @param in_msg The input message to be checked. + * + * @returns True if the input message is special, False otherwise. + */ bool ValidateQuery::is_special_in_msg(const vm::CellSlice& in_msg) const { return (recover_create_msg_.not_null() && vm::load_cell_slice(recover_create_msg_).contents_equal(in_msg)) || (mint_msg_.not_null() && vm::load_cell_slice(mint_msg_).contents_equal(in_msg)); } +/** + * Checks the validity of an inbound message listed in InMsgDescr. + * + * @param key The 256-bit key of the inbound message. + * @param in_msg The inbound message to be checked serialized using InMsg TLB-scheme. + * + * @returns True if the inbound message is valid, false otherwise. + */ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) { LOG(DEBUG) << "checking InMsg with key " << key.to_hex(256); CHECK(in_msg.not_null()); @@ -3274,6 +3731,11 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) return true; } +/** + * Checks the validity of the inbound messages listed in the InMsgDescr dictionary. + * + * @returns True if the inbound messages dictionary is valid, false otherwise. + */ bool ValidateQuery::check_in_msg_descr() { LOG(INFO) << "checking inbound messages listed in InMsgDescr"; try { @@ -3293,6 +3755,14 @@ bool ValidateQuery::check_in_msg_descr() { return true; } +/** + * Checks the validity of an outbound message listed in OutMsgDescr. + * + * @param key The 256-bit key of the outbound message. + * @param in_msg The outbound message to be checked serialized using OutMsg TLB-scheme. + * + * @returns True if the outbound message is valid, false otherwise. + */ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_msg) { LOG(DEBUG) << "checking OutMsg with key " << key.to_hex(256); CHECK(out_msg.not_null()); @@ -3810,6 +4280,11 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms return true; } +/** + * Checks the validity of the outbound messages listed in the OutMsgDescr dictionary. + * + * @returns True if the outbound messages dictionary is valid, false otherwise. + */ bool ValidateQuery::check_out_msg_descr() { LOG(INFO) << "checking outbound messages listed in OutMsgDescr"; try { @@ -3828,7 +4303,12 @@ bool ValidateQuery::check_out_msg_descr() { return true; } -// compare to Collator::update_processed_upto() +/** + * Checks if the processed up to information is valid and consistent. + * Compare to Collator::update_processed_upto() + * + * @returns True if the processed up to information is valid and consistent, false otherwise. + */ bool ValidateQuery::check_processed_upto() { LOG(INFO) << "checking ProcessedInfo"; CHECK(ps_.processed_upto_); @@ -3884,7 +4364,18 @@ bool ValidateQuery::check_processed_upto() { return true; } -// similar to Collator::process_inbound_message +/** + * Checks the validity of an outbound message in the neighbor's queue. + * Similar to Collator::process_inbound_message. + * + * @param enq_msg The enqueued message to validate. + * @param lt The logical time of the message. + * @param key The 32+64+256-bit key of the message. + * @param nb The neighbor's description. + * @param unprocessed A boolean flag that will be set to true if the message is unprocessed, false otherwise. + * + * @returns True if the message is valid, false otherwise. + */ bool ValidateQuery::check_neighbor_outbound_message(Ref enq_msg, ton::LogicalTime lt, td::ConstBitPtr key, const block::McShardDescr& nb, bool& unprocessed) { @@ -4023,6 +4514,11 @@ bool ValidateQuery::check_neighbor_outbound_message(Ref enq_msg, return true; } +/** + * Checks messages from the outbound queues of the neighbors. + * + * @returns True if the messages are valid, false otherwise. + */ bool ValidateQuery::check_in_queue() { block::OutputQueueMerger nb_out_msgs(shard_, neighbors_); while (!nb_out_msgs.is_eof()) { @@ -4054,9 +4550,12 @@ bool ValidateQuery::check_in_queue() { return true; } -// checks that all messages imported from our outbound queue into neighbor shards have been dequeued -// similar to Collator::out_msg_queue_cleanup() -// (but scans new outbound queue instead of the old) +/** + * Checks that all messages imported from our outbound queue into neighbor shards have been dequeued + * Similar to Collator::out_msg_queue_cleanup() (but scans the new outbound queue instead of the old). + * + * @returns True if the delivery status of all messages has been checked successfully, false otherwise. + */ bool ValidateQuery::check_delivered_dequeued() { LOG(INFO) << "scanning new outbound queue and checking delivery status of all messages"; bool ok = false; @@ -4103,26 +4602,42 @@ bool ValidateQuery::check_delivered_dequeued() { }) || ok; } -// similar to Collator::make_account_from() -std::unique_ptr ValidateQuery::make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra) { +/** + * Creates a new Account object from the given address and serialized account data. + * Creates a new Account if not found. + * Similar to Collator::make_account_from() + * + * @param addr A pointer to the 256-bit address of the account. + * @param account A cell slice with an account serialized using ShardAccount TLB-scheme. + * + * @returns A unique pointer to the created Account object, or nullptr if the creation failed. + */ +std::unique_ptr ValidateQuery::make_account_from(td::ConstBitPtr addr, Ref account) { auto ptr = std::make_unique(workchain(), addr); if (account.is_null()) { if (!ptr->init_new(now_)) { return nullptr; } - } else if (!ptr->unpack(std::move(account), std::move(extra), now_, - is_masterchain() && config_->is_special_smartcontract(addr))) { + } else if (!ptr->unpack(std::move(account), now_, is_masterchain() && config_->is_special_smartcontract(addr))) { return nullptr; } ptr->block_lt = start_lt_; return ptr; } -// similar to Collator::make_account() +/** + * Retreives an Account object from the data in the shard state. + * Accounts are cached in the ValidatorQuery's map. + * Similar to Collator::make_account() + * + * @param addr The 256-bit address of the account. + * + * @returns Pointer to the account if found or created successfully. + * Returns nullptr if an error occured. + */ std::unique_ptr ValidateQuery::unpack_account(td::ConstBitPtr addr) { auto dict_entry = ps_.account_dict_->lookup_extra(addr, 256); - auto new_acc = make_account_from(addr, std::move(dict_entry.first), std::move(dict_entry.second)); + auto new_acc = make_account_from(addr, std::move(dict_entry.first)); if (!new_acc) { reject_query("cannot load state of account "s + addr.to_hex(256) + " from previous shardchain state"); return {}; @@ -4135,6 +4650,18 @@ std::unique_ptr ValidateQuery::unpack_account(td::ConstBitPtr ad return new_acc; } +/** + * Checks the validity of a single transaction for a given account. + * Performs transaction execution. + * + * @param account The account of the transaction. + * @param lt The logical time of the transaction. + * @param trans_root The root of the transaction. + * @param is_first Flag indicating if this is the first transaction of the account. + * @param is_last Flag indicating if this is the last transaction of the account. + * + * @returns True if the transaction is valid, false otherwise. + */ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalTime lt, Ref trans_root, bool is_first, bool is_last) { if (!check_timeout()) { @@ -4598,7 +5125,15 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT return true; } -// NB: may be run in parallel for different accounts +/** + * Checks the validity of transactions for a given account block. + * NB: may be run in parallel for different accounts + * + * @param acc_addr The address of the account. + * @param acc_blk_root The root of the AccountBlock. + * + * @returns True if the account transactions are valid, false otherwise. + */ bool ValidateQuery::check_account_transactions(const StdSmcAddress& acc_addr, Ref acc_blk_root) { block::gen::AccountBlock::Record acc_blk; CHECK(tlb::csr_unpack(std::move(acc_blk_root), acc_blk) && acc_blk.account_addr == acc_addr); @@ -4630,6 +5165,11 @@ bool ValidateQuery::check_account_transactions(const StdSmcAddress& acc_addr, Re } } +/** + * Checks all transactions in the account blocks. + * + * @returns True if all transactions pass the check, False otherwise. + */ bool ValidateQuery::check_transactions() { LOG(INFO) << "checking all transactions"; return account_blocks_dict_->check_for_each_extra( @@ -4639,7 +5179,17 @@ bool ValidateQuery::check_transactions() { }); } -// similar to Collator::update_account_public_libraries() +/** + * Processes changes in libraries of an account. + * Used in masterchain validation. + * Similar to Collator::update_account_public_libraries() + * + * @param orig_libs The original libraries of the account. + * @param final_libs The final libraries of the account. + * @param addr The address of the account. + * + * @returns True if the update was successful, false otherwise. + */ bool ValidateQuery::scan_account_libraries(Ref orig_libs, Ref final_libs, const td::Bits256& addr) { vm::Dictionary dict1{std::move(orig_libs), 256}, dict2{std::move(final_libs), 256}; return dict1.scan_diff( @@ -4657,6 +5207,12 @@ bool ValidateQuery::scan_account_libraries(Ref orig_libs, Ref in_msg_root, const block::CurrencyCollection& amount, Ref addr_cell) { if (in_msg_root.is_null()) { @@ -4786,11 +5357,28 @@ bool ValidateQuery::check_special_message(Ref in_msg_root, const block return true; } +/** + * Checks if all necessary special messages are valid and exist in the incoming messages dictionary. + * Used in masterchain validation. + * + * @returns True if special messages are valid, false otherwise. + */ bool ValidateQuery::check_special_messages() { return check_special_message(recover_create_msg_, value_flow_.recovered, config_->get_config_param(3, 1)) && check_special_message(mint_msg_, value_flow_.minted, config_->get_config_param(2, 0)); } +/** + * Checks if an update of LibDescr of as single library update is valid. + * Compares updates in LibDescr against updates of account states. + * Used in masterchain validation. + * + * @param key The 256-bit key of the library. + * @param old_value The old value of the LibDescr + * @param new_value The new value of the LibDescr. + * + * @returns True if the library update is valid, false otherwise. + */ bool ValidateQuery::check_one_library_update(td::ConstBitPtr key, Ref old_value, Ref new_value) { // shared_lib_descr$00 lib:^Cell publishers:(Hashmap 256 True) = LibDescr; @@ -4841,6 +5429,12 @@ bool ValidateQuery::check_one_library_update(td::ConstBitPtr key, Refscan_diff( @@ -4861,6 +5455,11 @@ bool ValidateQuery::check_shard_libraries() { return true; } +/** + * Checks the validity of the new shard state. + * + * @returns True if the new state is valid, false otherwise. + */ bool ValidateQuery::check_new_state() { LOG(INFO) << "checking header of the new shardchain state"; block::gen::ShardStateUnsplit::Record info; @@ -4955,6 +5554,15 @@ bool ValidateQuery::check_new_state() { return true; } +/** + * Checks if a masterchain configuration update is valid. + * Used in masterchain validation. + * + * @param old_conf_params The old configuration parameters. + * @param new_conf_params The new configuration parameters. + * + * @returns True if the update is valid, false otherwise. + */ bool ValidateQuery::check_config_update(Ref old_conf_params, Ref new_conf_params) { if (!block::gen::t_ConfigParams.validate_csr(10000, new_conf_params)) { return reject_query("new configuration failed to pass automated validity checks"); @@ -5059,6 +5667,16 @@ bool ValidateQuery::check_config_update(Ref old_conf_params, Ref< "reason (the suggested configuration appears to be valid)"); } +/** + * Checks if a single entry in the dictionary of previous masterchain blocks is valid and consistent. + * Used in masterchain validation. + * + * @param seqno The sequence number of the entry. + * @param old_val_extra The old value of the entry. + * @param new_val_extra The new value of the entry. + * + * @returns True if the update is valid and consistent, false otherwise. + */ bool ValidateQuery::check_one_prev_dict_update(ton::BlockSeqno seqno, Ref old_val_extra, Ref new_val_extra) { if (old_val_extra.not_null() && new_val_extra.is_null()) { @@ -5113,7 +5731,13 @@ bool ValidateQuery::check_one_prev_dict_update(ton::BlockSeqno seqno, Ref old_val, Ref new_val) { LOG(DEBUG) << "checking update of CreatorStats for "s + key.to_hex(256); @@ -5365,7 +6009,13 @@ bool ValidateQuery::check_one_block_creator_update(td::ConstBitPtr key, Refget_shard_hash(shard); @@ -5433,6 +6093,12 @@ bool ValidateQuery::check_one_shard_fee(ShardIdFull shard, const block::Currency return true; } +/** + * Checks the validity of the McBlockExtra in a masterchain block. + * Used in masterchain validation. + * + * @returns True if the data is valid, false otherwise. + */ bool ValidateQuery::check_mc_block_extra() { if (!is_masterchain()) { return true; @@ -5484,6 +6150,11 @@ bool ValidateQuery::check_mc_block_extra() { return true; } +/** + * Validates the value flow of a block. + * + * @returns True if the value flow is valid, False otherwise. + */ bool ValidateQuery::postcheck_value_flow() { auto expected_fees = value_flow_.fees_imported + value_flow_.created + transaction_fees_ + import_fees_ - fees_burned_; @@ -5504,13 +6175,11 @@ bool ValidateQuery::postcheck_value_flow() { return true; } -/* - * - * MAIN VALIDATOR FUNCTION - * (invokes other methods in a suitable order) +/** + * MAIN VALIDATOR FUNCTION (invokes other methods in a suitable order). * + * @returns True if the validation is successful, False otherwise. */ - bool ValidateQuery::try_validate() { if (pending) { return true; @@ -5616,6 +6285,11 @@ bool ValidateQuery::try_validate() { return save_candidate(); } +/** + * Saves the candidate to disk. + * + * @returns True. + */ bool ValidateQuery::save_candidate() { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result R) { if (R.is_error()) { @@ -5629,6 +6303,10 @@ bool ValidateQuery::save_candidate() { return true; } +/** + * Callback function called after saving block candidate. + * Finishes validation. + */ void ValidateQuery::written_candidate() { finish_query(); } diff --git a/validator/impl/validate-query.hpp b/validator/impl/validate-query.hpp index 7d15dcfe..ff8cc83c 100644 --- a/validator/impl/validate-query.hpp +++ b/validator/impl/validate-query.hpp @@ -342,8 +342,7 @@ class ValidateQuery : public td::actor::Actor { const block::McShardDescr& src_nb, bool& unprocessed); bool check_in_queue(); bool check_delivered_dequeued(); - std::unique_ptr make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra); + std::unique_ptr make_account_from(td::ConstBitPtr addr, Ref account); std::unique_ptr unpack_account(td::ConstBitPtr addr); bool check_one_transaction(block::Account& account, LogicalTime lt, Ref trans_root, bool is_first, bool is_last); From 030ebaf772bf95807ccc07520018f9477c241414 Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Fri, 3 Nov 2023 15:25:58 +0300 Subject: [PATCH 26/71] Change GASCONSUMED opcode to f807 --- crypto/fift/lib/Asm.fif | 2 +- crypto/vm/tonops.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/crypto/fift/lib/Asm.fif b/crypto/fift/lib/Asm.fif index a0bfe642..0a4c7074 100644 --- a/crypto/fift/lib/Asm.fif +++ b/crypto/fift/lib/Asm.fif @@ -1275,7 +1275,7 @@ x{F4BF} @Defop DICTUGETEXECZ x{F800} @Defop ACCEPT x{F801} @Defop SETGASLIMIT -x{F806} @Defop GASCONSUMED +x{F807} @Defop GASCONSUMED x{F80F} @Defop COMMIT x{F810} @Defop RANDU256 diff --git a/crypto/vm/tonops.cpp b/crypto/vm/tonops.cpp index f491d252..d150f30b 100644 --- a/crypto/vm/tonops.cpp +++ b/crypto/vm/tonops.cpp @@ -101,7 +101,7 @@ void register_basic_gas_ops(OpcodeTable& cp0) { using namespace std::placeholders; cp0.insert(OpcodeInstr::mksimple(0xf800, 16, "ACCEPT", exec_accept)) .insert(OpcodeInstr::mksimple(0xf801, 16, "SETGASLIMIT", exec_set_gas_limit)) - .insert(OpcodeInstr::mksimple(0xf806, 16, "GASCONSUMED", exec_gas_consumed)->require_version(4)) + .insert(OpcodeInstr::mksimple(0xf807, 16, "GASCONSUMED", exec_gas_consumed)->require_version(4)) .insert(OpcodeInstr::mksimple(0xf80f, 16, "COMMIT", exec_commit)); } From 79ed14cba950f267aa37d532c80e674a36a739a8 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Fri, 3 Nov 2023 15:53:11 +0300 Subject: [PATCH 27/71] Add information on running tests (#796) Co-authored-by: SpyCheese --- README.md | 2 +- doc/Tests.md | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 doc/Tests.md diff --git a/README.md b/README.md index 88e02711..7e78bb04 100644 --- a/README.md +++ b/README.md @@ -70,4 +70,4 @@ If a CI workflow fails not because of your changes but workflow issues, try to f ## Running tests -Tests are executed by running `ctest` in the build directory. \ No newline at end of file +Tests are executed by running `ctest` in the build directory. See `doc/Tests.md` for more information. \ No newline at end of file diff --git a/doc/Tests.md b/doc/Tests.md new file mode 100644 index 00000000..c883731a --- /dev/null +++ b/doc/Tests.md @@ -0,0 +1,24 @@ +# Tests execution +TON contains multiple unit-tests, that facilitate detection of erroneous blockchain behaviour on each commit. +## Build tests +Go inside the build directory and, if you use ninja, build the tests using the following command: + +```ninja test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state``` + +For more details on how to build TON artifacts, please refer to any of Github actions. + +For cmake use: + +```cmake --build . --target test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state``` + +## Run tests +Go inside the build directory and with ninja execute: + +```ninja test``` + +with ctest: + +```ctest``` + +## Integration of tests into CI +Most relevant GitHub actions include the step ```Run tests``` that executes the tests. If any of tests fails, the action will be interrupted and no artifacts will be provided. \ No newline at end of file From ba03657617a015166dc1e74d15549db8846a53fe Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Fri, 3 Nov 2023 17:05:43 +0300 Subject: [PATCH 28/71] Fix transaction credit phase (#797) Co-authored-by: SpyCheese --- crypto/block/transaction.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 9df5be13..dba3ce90 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -955,10 +955,14 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc */ bool Transaction::prepare_credit_phase() { credit_phase = std::make_unique(); + // Due payment is only collected in storage phase. + // For messages with bounce flag, contract always receives the amount specified in message // auto collected = std::min(msg_balance_remaining.grams, due_payment); // credit_phase->due_fees_collected = collected; // due_payment -= collected; // credit_phase->credit = msg_balance_remaining -= collected; + credit_phase->due_fees_collected = td::zero_refint(); + credit_phase->credit = msg_balance_remaining; if (!msg_balance_remaining.is_valid()) { LOG(ERROR) << "cannot compute the amount to be credited in the credit phase of transaction"; return false; From 909e7dbdfc48a3c3e0774b422b99ff4b3d375504 Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Wed, 22 Nov 2023 10:25:53 +0300 Subject: [PATCH 29/71] Allow anycast destination address in masterchain (#807) --- crypto/block/transaction.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index dba3ce90..49c853a9 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1976,11 +1976,6 @@ bool Transaction::check_rewrite_dest_addr(Ref& dest_addr, const A } if (rec.anycast->size() > 1) { // destination address is an anycast - if (rec.workchain_id == ton::masterchainId) { - // anycast addresses disabled in masterchain - LOG(DEBUG) << "masterchain destination address has an anycast field"; - return false; - } vm::CellSlice cs{*rec.anycast}; int d = (int)cs.fetch_ulong(6) - 32; if (d <= 0 || d > 30) { From 31263fb4754025c3c5db1504c825e96ecf010653 Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Wed, 22 Nov 2023 10:27:39 +0300 Subject: [PATCH 30/71] Limit max number of public libraries on contracts (#808) --- crypto/block/block.cpp | 2 +- crypto/block/block.h | 2 ++ crypto/block/block.tlb | 3 ++- crypto/block/mc-config.cpp | 1 + crypto/block/mc-config.h | 1 + crypto/block/transaction.cpp | 42 ++++++++++++++++++++++++++++++------ validator/impl/collator.cpp | 2 +- 7 files changed, 44 insertions(+), 9 deletions(-) diff --git a/crypto/block/block.cpp b/crypto/block/block.cpp index 1131213c..9a3dba60 100644 --- a/crypto/block/block.cpp +++ b/crypto/block/block.cpp @@ -715,7 +715,7 @@ td::uint64 BlockLimitStatus::estimate_block_size(const vm::NewCellStorageStat::S sum += *extra; } return 2000 + (sum.bits >> 3) + sum.cells * 12 + sum.internal_refs * 3 + sum.external_refs * 40 + accounts * 200 + - transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300; + transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300 + extra_library_diff * 700; } int BlockLimitStatus::classify() const { diff --git a/crypto/block/block.h b/crypto/block/block.h index 19d99e6a..09169429 100644 --- a/crypto/block/block.h +++ b/crypto/block/block.h @@ -262,6 +262,7 @@ struct BlockLimitStatus { td::uint64 gas_used{}; vm::NewCellStorageStat st_stat; unsigned accounts{}, transactions{}, extra_out_msgs{}; + unsigned extra_library_diff{}; // Number of public libraries in deleted/frozen accounts BlockLimitStatus(const BlockLimits& limits_, ton::LogicalTime lt = 0) : limits(limits_), cur_lt(std::max(limits_.start_lt, lt)) { } @@ -271,6 +272,7 @@ struct BlockLimitStatus { transactions = accounts = 0; gas_used = 0; extra_out_msgs = 0; + extra_library_diff = 0; } td::uint64 estimate_block_size(const vm::NewCellStorageStat::Stat* extra = nullptr) const; int classify() const; diff --git a/crypto/block/block.tlb b/crypto/block/block.tlb index 1b6f72a3..4b36f13b 100644 --- a/crypto/block/block.tlb +++ b/crypto/block/block.tlb @@ -780,7 +780,8 @@ _ MisbehaviourPunishmentConfig = ConfigParam 40; size_limits_config#01 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16 max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig; size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16 - max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32 = SizeLimitsConfig; + max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32 + max_acc_public_libraries:uint32 = SizeLimitsConfig; _ SizeLimitsConfig = ConfigParam 43; // key is [ wc:int32 addr:uint256 ] diff --git a/crypto/block/mc-config.cpp b/crypto/block/mc-config.cpp index 02c6e45b..08be5c88 100644 --- a/crypto/block/mc-config.cpp +++ b/crypto/block/mc-config.cpp @@ -1934,6 +1934,7 @@ td::Result Config::get_size_limits_config() const { unpack_v1(rec); limits.max_acc_state_bits = rec.max_acc_state_bits; limits.max_acc_state_cells = rec.max_acc_state_cells; + limits.max_acc_public_libraries = rec.max_acc_public_libraries; }; gen::SizeLimitsConfig::Record_size_limits_config rec_v1; gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2; diff --git a/crypto/block/mc-config.h b/crypto/block/mc-config.h index b29e3792..caab93f3 100644 --- a/crypto/block/mc-config.h +++ b/crypto/block/mc-config.h @@ -389,6 +389,7 @@ struct SizeLimitsConfig { ExtMsgLimits ext_msg_limits; td::uint32 max_acc_state_cells = 1 << 16; td::uint32 max_acc_state_bits = (1 << 16) * 1023; + td::uint32 max_acc_public_libraries = 256; }; struct CatchainValidatorsConfig { diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 49c853a9..05df6371 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -2500,8 +2500,28 @@ int Transaction::try_action_reserve_currency(vm::CellSlice& cs, ActionPhase& ap, return 0; } +/** + * Calculates the number of public libraries in the dictionary. + * + * @param libraries The dictionary of account libraries. + * + * @returns The number of public libraries in the dictionary. + */ +static td::uint32 get_public_libraries_count(const td::Ref& libraries) { + td::uint32 count = 0; + vm::Dictionary dict{libraries, 256}; + dict.check_for_each([&](td::Ref value, td::ConstBitPtr key, int) { + if (block::is_public_library(key, std::move(value))) { + ++count; + } + return true; + }); + return count; +} + /** * Checks that the new account state fits in the limits. + * This function is not called for special accounts. * * @param cfg The configuration for the action phase. * @@ -2547,10 +2567,15 @@ td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { } else { new_storage_stat.clear(); } - return new_storage_stat.cells <= cfg.size_limits.max_acc_state_cells && - new_storage_stat.bits <= cfg.size_limits.max_acc_state_bits - ? td::Status::OK() - : td::Status::Error("state too big"); + if (new_storage_stat.cells > cfg.size_limits.max_acc_state_cells || + new_storage_stat.bits > cfg.size_limits.max_acc_state_bits) { + return td::Status::Error("account state is too big"); + } + if (account.is_masterchain() && !cell_equal(account.library, new_library) && + get_public_libraries_count(new_library) > cfg.size_limits.max_acc_public_libraries) { + return td::Status::Error("too many public libraries"); + } + return td::Status::OK(); } /** @@ -3135,8 +3160,13 @@ bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) return false; } if (with_size) { - return blimst.add_proof(new_total_state) && blimst.add_cell(root) && blimst.add_transaction() && - blimst.add_account(is_first); + if (!(blimst.add_proof(new_total_state) && blimst.add_cell(root) && blimst.add_transaction() && + blimst.add_account(is_first))) { + return false; + } + if (account.is_masterchain() && (was_frozen || was_deleted)) { + blimst.extra_library_diff += get_public_libraries_count(account.orig_library); + } } return true; } diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index d133278f..b72292bb 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -4310,7 +4310,7 @@ bool Collator::update_public_libraries() { } } } - if (libraries_changed_ && verbosity >= 2 * 0) { + if (libraries_changed_ && verbosity >= 2) { std::cerr << "New public libraries: "; block::gen::t_HashmapE_256_LibDescr.print(std::cerr, shard_libraries_->get_root()); shard_libraries_->get_root()->print_rec(std::cerr); From 1cffca0b407dfd0e47a2f95e8e1f757f6c4f177c Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Wed, 22 Nov 2023 10:27:58 +0300 Subject: [PATCH 31/71] Fix due payment reimbursement (#809) --- crypto/block/transaction.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 05df6371..23de6e29 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -885,7 +885,7 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc if (now < account.last_paid) { return false; } - auto to_pay = account.compute_storage_fees(now, *(cfg.pricing)); + auto to_pay = account.compute_storage_fees(now, *(cfg.pricing)) + due_payment; if (to_pay.not_null() && sgn(to_pay) < 0) { return false; } @@ -898,7 +898,7 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc res->fees_collected = to_pay; res->fees_due = td::zero_refint(); balance -= std::move(to_pay); - } else if (acc_status == Account::acc_frozen && !force_collect && to_pay + due_payment < cfg.delete_due_limit) { + } else if (acc_status == Account::acc_frozen && !force_collect && to_pay < cfg.delete_due_limit) { // do not collect fee res->last_paid_updated = (res->is_special ? 0 : account.last_paid); res->fees_collected = res->fees_due = td::zero_refint(); @@ -907,7 +907,7 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc res->fees_due = std::move(to_pay) - std::move(balance.grams); balance.grams = td::zero_refint(); if (!res->is_special) { - auto total_due = res->fees_due + due_payment; + auto total_due = res->fees_due; switch (acc_status) { case Account::acc_uninit: case Account::acc_frozen: From 6b8994e45645b073139e2bf49fec49ae9fb67c5d Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 22 Nov 2023 15:41:02 +0300 Subject: [PATCH 32/71] Return zero instead of null in compute_storage_fees (#810) Co-authored-by: SpyCheese --- crypto/block/transaction.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 23de6e29..33f1c778 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -652,7 +652,7 @@ td::RefInt256 StoragePrices::compute_storage_fees(ton::UnixTime now, const std:: const vm::CellStorageStat& storage_stat, ton::UnixTime last_paid, bool is_special, bool is_masterchain) { if (now <= last_paid || !last_paid || is_special || pricing.empty() || now <= pricing[0].valid_since) { - return {}; + return td::zero_refint(); } std::size_t n = pricing.size(), i = n; while (i && pricing[i - 1].valid_since > last_paid) { From d9580eab1bd1499ce90d801b19f164b4e85653bc Mon Sep 17 00:00:00 2001 From: "aleksej.paschenko" Date: Thu, 23 Nov 2023 14:01:45 +0300 Subject: [PATCH 33/71] Increase emulator capability to (de)serialize data (#811) --- emulator/emulator-extern.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/emulator/emulator-extern.cpp b/emulator/emulator-extern.cpp index 4b65dd06..9f06964e 100644 --- a/emulator/emulator-extern.cpp +++ b/emulator/emulator-extern.cpp @@ -503,7 +503,7 @@ const char *tvm_emulator_run_get_method(void *tvm_emulator, int method_id, const auto emulator = static_cast(tvm_emulator); auto result = emulator->run_get_method(method_id, stack); - vm::FakeVmStateLimits fstate(1000); // limit recursive (de)serialization calls + vm::FakeVmStateLimits fstate(3500); // limit recursive (de)serialization calls vm::VmStateInterface::Guard guard(&fstate); vm::CellBuilder stack_cb; From 7262a66d210b843502353d6aa79406faa5eb9ccf Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Thu, 23 Nov 2023 18:17:44 +0300 Subject: [PATCH 34/71] Don't allow deploying a contract with public libs (#812) * Check account size limits in unpack_msg_state * Don't allow deploying a contract with public libs --------- Co-authored-by: SpyCheese --- crypto/block/transaction.cpp | 62 +++++++++++++++++++++---------- crypto/block/transaction.h | 5 ++- validator/impl/validate-query.cpp | 1 + 3 files changed, 47 insertions(+), 21 deletions(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 33f1c778..62a48cab 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1290,11 +1290,13 @@ int output_actions_count(Ref list) { /** * Unpacks the message StateInit. * + * @param cfg The configuration for the compute phase. * @param lib_only If true, only unpack libraries from the state. + * @param forbid_public_libs Don't allow public libraries in initstate. * * @returns True if the unpacking is successful, false otherwise. */ -bool Transaction::unpack_msg_state(bool lib_only) { +bool Transaction::unpack_msg_state(const ComputePhaseConfig& cfg, bool lib_only, bool forbid_public_libs) { block::gen::StateInit::Record state; if (in_msg_state.is_null() || !tlb::unpack_cell(in_msg_state, state)) { LOG(ERROR) << "cannot unpack StateInit from an inbound message"; @@ -1318,9 +1320,22 @@ bool Transaction::unpack_msg_state(bool lib_only) { new_tock = z & 1; LOG(DEBUG) << "tick=" << new_tick << ", tock=" << new_tock; } + td::Ref old_code = new_code, old_data = new_data, old_library = new_library; new_code = state.code->prefetch_ref(); new_data = state.data->prefetch_ref(); new_library = state.library->prefetch_ref(); + auto size_limits = cfg.size_limits; + if (forbid_public_libs) { + size_limits.max_acc_public_libraries = 0; + } + auto S = check_state_limits(size_limits, false); + if (S.is_error()) { + LOG(DEBUG) << "Cannot unpack msg state: " << S.move_as_error(); + new_code = old_code; + new_data = old_data; + new_library = old_library; + return false; + } return true; } @@ -1408,7 +1423,9 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { return true; } use_msg_state = true; - if (!(unpack_msg_state() && account.check_split_depth(new_split_depth))) { + bool forbid_public_libs = + acc_status == Account::acc_uninit && account.is_masterchain(); // Forbid for deploying, allow for unfreezing + if (!(unpack_msg_state(cfg, false, forbid_public_libs) && account.check_split_depth(new_split_depth))) { LOG(DEBUG) << "cannot unpack in_msg_state, or it has bad split_depth; cannot init account state"; cp.skip_reason = ComputePhase::sk_bad_state; return true; @@ -1423,7 +1440,7 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { cp.skip_reason = in_msg_state.not_null() ? ComputePhase::sk_bad_state : ComputePhase::sk_no_state; return true; } else if (in_msg_state.not_null()) { - unpack_msg_state(true); // use only libraries + unpack_msg_state(cfg, true); // use only libraries } if (in_msg_extern && in_msg_state.not_null() && account.addr != in_msg_state->get_hash().bits()) { LOG(DEBUG) << "in_msg_state hash mismatch in external message"; @@ -1560,7 +1577,7 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { if (account.is_special) { return true; } - auto S = check_state_limits(cfg); + auto S = check_state_limits(cfg.size_limits); if (S.is_error()) { // Rollback changes to state, fail action phase LOG(INFO) << "Account state size exceeded limits: " << S.move_as_error(); @@ -2523,13 +2540,14 @@ static td::uint32 get_public_libraries_count(const td::Ref& libraries) * Checks that the new account state fits in the limits. * This function is not called for special accounts. * - * @param cfg The configuration for the action phase. + * @param size_limits The size limits configuration. + * @param update_storage_stat Store storage stat in the Transaction's CellStorageStat. * * @returns A `td::Status` indicating the result of the check. * - If the state limits are within the allowed range, returns OK. * - If the state limits exceed the maximum allowed range, returns an error. */ -td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { +td::Status Transaction::check_state_limits(const SizeLimitsConfig& size_limits, bool update_storage_stat) { auto cell_equal = [](const td::Ref& a, const td::Ref& b) -> bool { if (a.is_null()) { return b.is_null(); @@ -2543,13 +2561,13 @@ td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { cell_equal(account.library, new_library)) { return td::Status::OK(); } - // new_storage_stat is used here beause these stats will be reused in compute_state() - new_storage_stat.limit_cells = cfg.size_limits.max_acc_state_cells; - new_storage_stat.limit_bits = cfg.size_limits.max_acc_state_bits; + vm::CellStorageStat storage_stat; + storage_stat.limit_cells = size_limits.max_acc_state_cells; + storage_stat.limit_bits = size_limits.max_acc_state_bits; td::Timer timer; auto add_used_storage = [&](const td::Ref& cell) -> td::Status { if (cell.not_null()) { - TRY_RESULT(res, new_storage_stat.add_used_storage(cell)); + TRY_RESULT(res, storage_stat.add_used_storage(cell)); if (res.max_merkle_depth > max_allowed_merkle_depth) { return td::Status::Error("too big merkle depth"); } @@ -2563,19 +2581,24 @@ td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { LOG(INFO) << "Compute used storage took " << timer.elapsed() << "s"; } if (acc_status == Account::acc_active) { - new_storage_stat.clear_limit(); + storage_stat.clear_limit(); } else { - new_storage_stat.clear(); + storage_stat.clear(); } - if (new_storage_stat.cells > cfg.size_limits.max_acc_state_cells || - new_storage_stat.bits > cfg.size_limits.max_acc_state_bits) { - return td::Status::Error("account state is too big"); + td::Status res; + if (storage_stat.cells > size_limits.max_acc_state_cells || storage_stat.bits > size_limits.max_acc_state_bits) { + res = td::Status::Error(PSTRING() << "account state is too big"); + } else if (account.is_masterchain() && !cell_equal(account.library, new_library) && + get_public_libraries_count(new_library) > size_limits.max_acc_public_libraries) { + res = td::Status::Error("too many public libraries"); + } else { + res = td::Status::OK(); } - if (account.is_masterchain() && !cell_equal(account.library, new_library) && - get_public_libraries_count(new_library) > cfg.size_limits.max_acc_public_libraries) { - return td::Status::Error("too many public libraries"); + if (update_storage_stat) { + // storage_stat will be reused in compute_state() + new_storage_stat = std::move(storage_stat); } - return td::Status::OK(); + return res; } /** @@ -3407,6 +3430,7 @@ td::Status FetchConfigParams::fetch_config_params( compute_phase_cfg->prev_blocks_info = std::move(prev_blocks_info); } compute_phase_cfg->suspended_addresses = config.get_suspended_addresses(now); + compute_phase_cfg->size_limits = size_limits; } { // compute action_phase_cfg diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index 6bb47fd9..d7cb95d1 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -116,6 +116,7 @@ struct ComputePhaseConfig { int global_version = 0; Ref prev_blocks_info; std::unique_ptr suspended_addresses; + SizeLimitsConfig size_limits; int vm_log_verbosity = 0; ComputePhaseConfig(td::uint64 _gas_price = 0, td::uint64 _gas_limit = 0, td::uint64 _gas_credit = 0) @@ -372,7 +373,7 @@ struct Transaction { std::vector> compute_vm_libraries(const ComputePhaseConfig& cfg); bool prepare_compute_phase(const ComputePhaseConfig& cfg); bool prepare_action_phase(const ActionPhaseConfig& cfg); - td::Status check_state_limits(const ActionPhaseConfig& cfg); + td::Status check_state_limits(const SizeLimitsConfig& size_limits, bool update_storage_stat = true); bool prepare_bounce_phase(const ActionPhaseConfig& cfg); bool compute_state(); bool serialize(); @@ -404,7 +405,7 @@ struct Transaction { bool serialize_compute_phase(vm::CellBuilder& cb); bool serialize_action_phase(vm::CellBuilder& cb); bool serialize_bounce_phase(vm::CellBuilder& cb); - bool unpack_msg_state(bool lib_only = false); + bool unpack_msg_state(const ComputePhaseConfig& cfg, bool lib_only = false, bool forbid_public_libs = false); }; } // namespace transaction diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index b134e1b3..d9d03207 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -954,6 +954,7 @@ bool ValidateQuery::fetch_config_params() { compute_phase_cfg_.prev_blocks_info = prev_blocks_info.move_as_ok(); } compute_phase_cfg_.suspended_addresses = config_->get_suspended_addresses(now_); + compute_phase_cfg_.size_limits = size_limits; } { // compute action_phase_cfg From 51d5113395fe0f765592c844ced5ef1ac4d99f4c Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 27 Nov 2023 11:30:46 +0300 Subject: [PATCH 35/71] Add 2023.11 Update changelogs --- Changelog.md | 11 +++++++++++ recent_changelog.md | 13 +++++++++---- 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/Changelog.md b/Changelog.md index fba3c5c5..440866c2 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,3 +1,14 @@ +##2023.11 Update + +1. New TVM Functionality. (Disabled by default) +2. A series of emulator improvements: libraries support, higher max stack size, etc +3. A series of tonlib and tonlib-cli improvements: wallet-v4 support, getconfig, showtransactions, etc +4. Changes to public libraries: now contract can not publish more than 256 libraries (config parameter) and contracts can not be deployed with public libraries in initstate (instead contracts need explicitly publish all libraries) +5. Changes to storage due payment: now due payment is collected in Storage Phase, however for bouncable messages fee amount can not exceed balance of account prior to message. + + +Besides the work of the core team, this update is based on the efforts of @aleksej-paschenko (emulator improvements), @akifoq (security improvements), Trail of Bits auditor as well as all participants of [TEP-88 discussion](https://github.com/ton-blockchain/TEPs/pull/88). + ## 2023.10 Update 1. A series of additional security checks in node: special cells in action list, init state in external messages, peers data prior to saving to disk. 2. Human-readable timestamps in explorer diff --git a/recent_changelog.md b/recent_changelog.md index fe2c34b2..5de7aed7 100644 --- a/recent_changelog.md +++ b/recent_changelog.md @@ -1,5 +1,10 @@ -## 2023.06 Update -1. (disabled by default) New deflation mechanisms: partial fee burning and blackhole address -2. Storage-contract improvement +##2023.11 Update -Besides the work of the core team, this update is based on the efforts of @DearJohnDoe from Tonbyte (Storage-contract improvement). +1. New TVM Functionality. (Disabled by default) +2. A series of emulator improvements: libraries support, higher max stack size, etc +3. A series of tonlib and tonlib-cli improvements: wallet-v4 support, getconfig, showtransactions, etc +4. Changes to public libraries: now contract can not publish more than 256 libraries (config parameter) and contracts can not be deployed with public libraries in initstate (instead contracts need explicitly publish all libraries) +5. Changes to storage due payment: now due payment is collected in Storage Phase, however for bouncable messages fee amount can not exceed balance of account prior to message. + + +Besides the work of the core team, this update is based on the efforts of @aleksej-paschenko (emulator improvements), @akifoq (security improvements), Trail of Bits auditor as well as all participants of [TEP-88 discussion](https://github.com/ton-blockchain/TEPs/pull/88). From 7fcf26771748338038aec4e9ec543dc69afeb1fa Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 6 Dec 2023 19:34:01 +0300 Subject: [PATCH 36/71] Improve large OutMsgQueue clearance (#822) * Improve Collator::opt_msg_queue_cleanup, increase collator timeout * Disable importing ext msgs if queue is too big * Extend timeout in collator if previous block is too old --------- Co-authored-by: SpyCheese --- crypto/vm/dict.cpp | 20 +++++++----- crypto/vm/dict.h | 4 +-- tdutils/td/utils/Time.h | 6 ++++ validator/impl/collator.cpp | 58 +++++++++++++++++++++++++++++------ validator/validator-group.cpp | 33 ++++++++++++++++---- validator/validator-group.hpp | 9 ++++++ 6 files changed, 105 insertions(+), 25 deletions(-) diff --git a/crypto/vm/dict.cpp b/crypto/vm/dict.cpp index ac32b38f..c79924d0 100644 --- a/crypto/vm/dict.cpp +++ b/crypto/vm/dict.cpp @@ -21,6 +21,7 @@ #include "vm/cellslice.h" #include "vm/stack.hpp" #include "common/bitstring.h" +#include "td/utils/Random.h" #include "td/utils/bits.h" @@ -2007,7 +2008,7 @@ bool DictionaryFixed::combine_with(DictionaryFixed& dict2) { bool DictionaryFixed::dict_check_for_each(Ref dict, td::BitPtr key_buffer, int n, int total_key_len, const DictionaryFixed::foreach_func_t& foreach_func, - bool invert_first) const { + bool invert_first, bool shuffle) const { if (dict.is_null()) { return true; } @@ -2026,26 +2027,29 @@ bool DictionaryFixed::dict_check_for_each(Ref dict, td::BitPtr key_buffer, key_buffer += l + 1; if (l) { invert_first = false; - } else if (invert_first) { + } + bool invert = shuffle ? td::Random::fast(0, 1) == 1: invert_first; + if (invert) { std::swap(c1, c2); } - key_buffer[-1] = invert_first; + key_buffer[-1] = invert; // recursive check_foreach applied to both children - if (!dict_check_for_each(std::move(c1), key_buffer, n - l - 1, total_key_len, foreach_func)) { + if (!dict_check_for_each(std::move(c1), key_buffer, n - l - 1, total_key_len, foreach_func, false, shuffle)) { return false; } - key_buffer[-1] = !invert_first; - return dict_check_for_each(std::move(c2), key_buffer, n - l - 1, total_key_len, foreach_func); + key_buffer[-1] = !invert; + return dict_check_for_each(std::move(c2), key_buffer, n - l - 1, total_key_len, foreach_func, false, shuffle); } -bool DictionaryFixed::check_for_each(const foreach_func_t& foreach_func, bool invert_first) { +bool DictionaryFixed::check_for_each(const foreach_func_t& foreach_func, bool invert_first, bool shuffle) { force_validate(); if (is_empty()) { return true; } int key_len = get_key_bits(); unsigned char key_buffer[max_key_bytes]; - return dict_check_for_each(get_root_cell(), td::BitPtr{key_buffer}, key_len, key_len, foreach_func, invert_first); + return dict_check_for_each(get_root_cell(), td::BitPtr{key_buffer}, key_len, key_len, foreach_func, invert_first, + shuffle); } static inline bool set_bit(td::BitPtr ptr, bool value = true) { diff --git a/crypto/vm/dict.h b/crypto/vm/dict.h index 978f4d53..c4044963 100644 --- a/crypto/vm/dict.h +++ b/crypto/vm/dict.h @@ -223,7 +223,7 @@ class DictionaryFixed : public DictionaryBase { int get_common_prefix(td::BitPtr buffer, unsigned buffer_len); bool cut_prefix_subdict(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false); Ref extract_prefix_subdict_root(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false); - bool check_for_each(const foreach_func_t& foreach_func, bool invert_first = false); + bool check_for_each(const foreach_func_t& foreach_func, bool invert_first = false, bool shuffle = false); int filter(filter_func_t check); bool combine_with(DictionaryFixed& dict2, const combine_func_t& combine_func, int mode = 0); bool combine_with(DictionaryFixed& dict2, const simple_combine_func_t& simple_combine_func, int mode = 0); @@ -292,7 +292,7 @@ class DictionaryFixed : public DictionaryBase { std::pair, bool> extract_prefix_subdict_internal(Ref dict, td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false) const; bool dict_check_for_each(Ref dict, td::BitPtr key_buffer, int n, int total_key_len, - const foreach_func_t& foreach_func, bool invert_first = false) const; + const foreach_func_t& foreach_func, bool invert_first = false, bool shuffle = false) const; std::pair, int> dict_filter(Ref dict, td::BitPtr key, int n, const filter_func_t& check_leaf, int& skip_rest) const; Ref dict_combine_with(Ref dict1, Ref dict2, td::BitPtr key_buffer, int n, int total_key_len, diff --git a/tdutils/td/utils/Time.h b/tdutils/td/utils/Time.h index 5151b818..ece822d4 100644 --- a/tdutils/td/utils/Time.h +++ b/tdutils/td/utils/Time.h @@ -110,6 +110,7 @@ class Timestamp { } friend bool operator==(Timestamp a, Timestamp b); + friend Timestamp &operator+=(Timestamp &a, double b); private: double at_{0}; @@ -122,6 +123,11 @@ inline bool operator<(const Timestamp &a, const Timestamp &b) { return a.at() < b.at(); } +inline Timestamp &operator+=(Timestamp &a, double b) { + a.at_ += b; + return a; +} + template void store(const Timestamp ×tamp, StorerT &storer) { storer.store_binary(timestamp.at() - Time::now() + Clocks::system()); diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index b72292bb..2030b345 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -703,6 +703,11 @@ bool Collator::unpack_last_mc_state() { return fatal_error(limits.move_as_error()); } block_limits_ = limits.move_as_ok(); + if (!is_masterchain()) { + // block_limits_->bytes = {131072 / 3, 524288 / 3, 1048576 / 3}; + // block_limits_->gas = {2000000 / 3, 10000000 / 3, 20000000 / 3}; + block_limits_->lt_delta = {20, 180, 200}; + } LOG(DEBUG) << "block limits: bytes [" << block_limits_->bytes.underload() << ", " << block_limits_->bytes.soft() << ", " << block_limits_->bytes.hard() << "]"; LOG(DEBUG) << "block limits: gas [" << block_limits_->gas.underload() << ", " << block_limits_->gas.soft() << ", " @@ -1818,6 +1823,17 @@ bool Collator::init_utime() { CHECK(config_); // consider unixtime and lt from previous block(s) of the same shardchain prev_now_ = prev_state_utime_; + // Extend collator timeout if previous block is too old + td::Timestamp new_timeout = td::Timestamp::in(std::min(30.0, (td::Clocks::system() - (double)prev_now_) / 2)); + if (timeout < new_timeout) { + double add = new_timeout.at() - timeout.at(); + timeout = new_timeout; + queue_cleanup_timeout_ += add; + soft_timeout_ += add; + medium_timeout_ += add; + alarm_timestamp() = timeout; + } + auto prev = std::max(config_->utime, prev_now_); now_ = std::max(prev + 1, (unsigned)std::time(nullptr)); if (now_ > now_upper_limit_) { @@ -2170,18 +2186,30 @@ bool Collator::out_msg_queue_cleanup() { } } - auto res = out_msg_queue_->filter([&](vm::CellSlice& cs, td::ConstBitPtr key, int n) -> int { + auto queue_root = out_msg_queue_->get_root_cell(); + if (queue_root.is_null()) { + LOG(DEBUG) << "out_msg_queue is empty"; + return true; + } + auto old_out_msg_queue = std::make_unique(queue_root, 352, block::tlb::aug_OutMsgQueue); + + int deleted = 0; + int total = 0; + bool fail = false; + old_out_msg_queue->check_for_each([&](Ref value, td::ConstBitPtr key, int n) -> bool { + ++total; assert(n == 352); + vm::CellSlice& cs = value.write(); // LOG(DEBUG) << "key is " << key.to_hex(n); if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) { LOG(WARNING) << "cleaning up outbound queue takes too long, ending"; outq_cleanup_partial_ = true; - return (1 << 30) + 1; // retain all remaining outbound queue entries including this one without processing + return false; // retain all remaining outbound queue entries including this one without processing } if (block_full_) { LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially"; outq_cleanup_partial_ = true; - return (1 << 30) + 1; // retain all remaining outbound queue entries including this one without processing + return false; // retain all remaining outbound queue entries including this one without processing } block::EnqueuedMsgDescr enq_msg_descr; unsigned long long created_lt; @@ -2190,7 +2218,8 @@ bool Collator::out_msg_queue_cleanup() { && enq_msg_descr.check_key(key) // check key && enq_msg_descr.lt_ == created_lt)) { LOG(ERROR) << "cannot unpack EnqueuedMsg with key " << key.to_hex(n); - return -1; + fail = true; + return false; } LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_; @@ -2208,22 +2237,29 @@ bool Collator::out_msg_queue_cleanup() { if (delivered) { LOG(DEBUG) << "outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ << " has been already delivered, dequeueing"; + ++deleted; + out_msg_queue_->lookup_delete_with_extra(key, n); if (!dequeue_message(std::move(enq_msg_descr.msg_env_), deliver_lt)) { fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex() << ") by inserting a msg_export_deq record"); - return -1; + fail = true; + return false; } register_out_msg_queue_op(); if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { block_full_ = true; } } - return !delivered; - }); - LOG(DEBUG) << "deleted " << res << " messages from out_msg_queue"; - if (res < 0) { + return true; + }, false, true /* random order */); + LOG(INFO) << "deleted " << deleted << " messages from out_msg_queue, processed " << total << " messages in total"; + if (fail) { return fatal_error("error scanning/updating OutMsgQueue"); } + if (outq_cleanup_partial_ || total > 8000) { + LOG(INFO) << "out_msg_queue too big, skipping importing external messages"; + skip_extmsg_ = true; + } auto rt = out_msg_queue_->get_root(); if (verbosity >= 2) { std::cerr << "new out_msg_queue is "; @@ -3277,6 +3313,10 @@ bool Collator::process_inbound_external_messages() { LOG(INFO) << "skipping processing of inbound external messages"; return true; } + if (out_msg_queue_->get_root_cell().not_null() && out_msg_queue_->get_root_cell()->get_depth() > 12) { + LOG(INFO) << "skipping processing of inbound external messages: out msg queue is too big"; + return true; + } bool full = !block_limit_status_->fits(block::ParamLimits::cl_soft); for (auto& ext_msg_pair : ext_msg_list_) { if (full) { diff --git a/validator/validator-group.cpp b/validator/validator-group.cpp index b4c38e51..c1f4f38a 100644 --- a/validator/validator-group.cpp +++ b/validator/validator-group.cpp @@ -78,6 +78,17 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat promise.set_error(td::Status::Error(ErrorCode::notready, "too old")); return; } + + auto next_block_id = create_next_block_id(block.id.root_hash, block.id.file_hash); + block.id = next_block_id; + + CacheKey cache_key = block_to_cache_key(block); + auto it = approved_candidates_cache_.find(cache_key); + if (it != approved_candidates_cache_.end()) { + promise.set_result(it->second); + return; + } + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), round_id, block = block.clone(), promise = std::move(promise)](td::Result R) mutable { if (R.is_error()) { @@ -93,24 +104,32 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat td::Timestamp::in(0.1)); } else { auto v = R.move_as_ok(); - v.visit(td::overloaded([&](UnixTime ts) { promise.set_result(ts); }, - [&](CandidateReject reject) { - promise.set_error(td::Status::Error(ErrorCode::protoviolation, - PSTRING() << "bad candidate: " << reject.reason)); - })); + v.visit(td::overloaded( + [&](UnixTime ts) { + td::actor::send_closure(SelfId, &ValidatorGroup::update_approve_cache, block_to_cache_key(block), + ts); + promise.set_result(ts); + }, + [&](CandidateReject reject) { + promise.set_error( + td::Status::Error(ErrorCode::protoviolation, PSTRING() << "bad candidate: " << reject.reason)); + })); } }); if (!started_) { P.set_error(td::Status::Error(ErrorCode::notready, "validator group not started")); return; } - auto next_block_id = create_next_block_id(block.id.root_hash, block.id.file_hash); VLOG(VALIDATOR_DEBUG) << "validating block candidate " << next_block_id; block.id = next_block_id; run_validate_query(shard_, min_ts_, min_masterchain_block_id_, prev_block_ids_, std::move(block), validator_set_, manager_, td::Timestamp::in(15.0), std::move(P)); } +void ValidatorGroup::update_approve_cache(CacheKey key, UnixTime value) { + approved_candidates_cache_[key] = value; +} + void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash src, td::BufferSlice block_data, RootHash root_hash, FileHash file_hash, std::vector signatures, @@ -155,6 +174,7 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s std::move(approve_sig_set), src == local_id_, manager_, std::move(P)); prev_block_ids_ = std::vector{next_block_id}; cached_collated_block_ = nullptr; + approved_candidates_cache_.clear(); } void ValidatorGroup::retry_accept_block_query(BlockIdExt block_id, td::Ref block, @@ -310,6 +330,7 @@ void ValidatorGroup::start(std::vector prev, BlockIdExt min_masterch min_masterchain_block_id_ = min_masterchain_block_id; min_ts_ = min_ts; cached_collated_block_ = nullptr; + approved_candidates_cache_.clear(); started_ = true; if (init_) { diff --git a/validator/validator-group.hpp b/validator/validator-group.hpp index da9193ba..a158bc43 100644 --- a/validator/validator-group.hpp +++ b/validator/validator-group.hpp @@ -126,6 +126,15 @@ class ValidatorGroup : public td::actor::Actor { std::shared_ptr cached_collated_block_; void generated_block_candidate(std::shared_ptr cache, td::Result R); + + typedef std::tuple CacheKey; + std::map approved_candidates_cache_; + + void update_approve_cache(CacheKey key, UnixTime value); + + static CacheKey block_to_cache_key(const BlockCandidate& block) { + return std::make_tuple(block.pubkey.as_bits256(), block.id, sha256_bits256(block.data), block.collated_file_hash); + } }; } // namespace validator From 9b6d699c21a190be0cea7a38effe97f831035723 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Fri, 8 Dec 2023 14:20:17 +0300 Subject: [PATCH 37/71] Cache recent block states and adjust timeouts (#823) * Add parameter --celldb-compress-depth to speed up celldb * Fix collator timeout * Add block_state_cache * Adjust state cache ttl * Don't merge shards when queue is too big * Decrease lt limit if previous block is too old --------- Co-authored-by: SpyCheese --- crypto/vm/db/CellStorage.cpp | 43 +++++++++++--- crypto/vm/db/CellStorage.h | 6 +- crypto/vm/db/DynamicBagOfCellsDb.cpp | 12 +++- crypto/vm/db/DynamicBagOfCellsDb.h | 3 + validator-engine/validator-engine.cpp | 6 ++ validator-engine/validator-engine.hpp | 4 ++ validator/db/celldb.cpp | 82 +++++++++++++++++++++++++-- validator/db/celldb.hpp | 21 ++++++- validator/db/rootdb.cpp | 2 +- validator/db/rootdb.hpp | 7 ++- validator/fabric.h | 4 +- validator/impl/collator-impl.h | 1 + validator/impl/collator.cpp | 16 +++--- validator/impl/fabric.cpp | 5 +- validator/manager-disk.cpp | 2 +- validator/manager-hardfork.cpp | 2 +- validator/manager.cpp | 36 +++++++++++- validator/manager.hpp | 6 ++ validator/validator-options.hpp | 7 +++ validator/validator.h | 2 + 20 files changed, 230 insertions(+), 37 deletions(-) diff --git a/crypto/vm/db/CellStorage.cpp b/crypto/vm/db/CellStorage.cpp index a1b7365b..acc55898 100644 --- a/crypto/vm/db/CellStorage.cpp +++ b/crypto/vm/db/CellStorage.cpp @@ -27,16 +27,25 @@ namespace vm { namespace { class RefcntCellStorer { public: - RefcntCellStorer(td::int32 refcnt, const DataCell &cell) : refcnt_(refcnt), cell_(cell) { + RefcntCellStorer(td::int32 refcnt, const td::Ref &cell, bool as_boc) + : refcnt_(refcnt), cell_(cell), as_boc_(as_boc) { } template void store(StorerT &storer) const { using td::store; + if (as_boc_) { + td::int32 tag = -1; + store(tag, storer); + store(refcnt_, storer); + td::BufferSlice data = vm::std_boc_serialize(cell_).move_as_ok(); + storer.store_slice(data); + return; + } store(refcnt_, storer); - store(cell_, storer); - for (unsigned i = 0; i < cell_.size_refs(); i++) { - auto cell = cell_.get_ref(i); + store(*cell_, storer); + for (unsigned i = 0; i < cell_->size_refs(); i++) { + auto cell = cell_->get_ref(i); auto level_mask = cell->get_level_mask(); auto level = level_mask.get_level(); td::uint8 x = static_cast(level_mask.get_mask()); @@ -60,7 +69,8 @@ class RefcntCellStorer { private: td::int32 refcnt_; - const DataCell &cell_; + td::Ref cell_; + bool as_boc_; }; class RefcntCellParser { @@ -69,11 +79,17 @@ class RefcntCellParser { } td::int32 refcnt; Ref cell; + bool stored_boc_; template void parse(ParserT &parser, ExtCellCreator &ext_cell_creator) { using ::td::parse; parse(refcnt, parser); + stored_boc_ = false; + if (refcnt == -1) { + stored_boc_ = true; + parse(refcnt, parser); + } if (!need_data_) { return; } @@ -81,6 +97,12 @@ class RefcntCellParser { TRY_STATUS(parser.get_status()); auto size = parser.get_left_len(); td::Slice data = parser.template fetch_string_raw(size); + if (stored_boc_) { + TRY_RESULT(boc, vm::std_boc_deserialize(data)); + TRY_RESULT(loaded_cell, boc->load_cell()); + cell = std::move(loaded_cell.data_cell); + return td::Status::OK(); + } CellSerializationInfo info; auto cell_data = data; TRY_STATUS(info.init(cell_data, 0 /*ref_byte_size*/)); @@ -122,7 +144,8 @@ class RefcntCellParser { }; } // namespace -CellLoader::CellLoader(std::shared_ptr reader) : reader_(std::move(reader)) { +CellLoader::CellLoader(std::shared_ptr reader, std::function on_load_callback) + : reader_(std::move(reader)), on_load_callback_(std::move(on_load_callback)) { CHECK(reader_); } @@ -145,7 +168,11 @@ td::Result CellLoader::load(td::Slice hash, bool need_da res.refcnt_ = refcnt_cell.refcnt; res.cell_ = std::move(refcnt_cell.cell); + res.stored_boc_ = refcnt_cell.stored_boc_; //CHECK(res.cell_->get_hash() == hash); + if (on_load_callback_) { + on_load_callback_(res); + } return res; } @@ -157,7 +184,7 @@ td::Status CellStorer::erase(td::Slice hash) { return kv_.erase(hash); } -td::Status CellStorer::set(td::int32 refcnt, const DataCell &cell) { - return kv_.set(cell.get_hash().as_slice(), td::serialize(RefcntCellStorer(refcnt, cell))); +td::Status CellStorer::set(td::int32 refcnt, const td::Ref &cell, bool as_boc) { + return kv_.set(cell->get_hash().as_slice(), td::serialize(RefcntCellStorer(refcnt, cell, as_boc))); } } // namespace vm diff --git a/crypto/vm/db/CellStorage.h b/crypto/vm/db/CellStorage.h index b705b531..3106ee16 100644 --- a/crypto/vm/db/CellStorage.h +++ b/crypto/vm/db/CellStorage.h @@ -45,19 +45,21 @@ class CellLoader { Ref cell_; td::int32 refcnt_{0}; + bool stored_boc_{false}; }; - CellLoader(std::shared_ptr reader); + CellLoader(std::shared_ptr reader, std::function on_load_callback = {}); td::Result load(td::Slice hash, bool need_data, ExtCellCreator &ext_cell_creator); private: std::shared_ptr reader_; + std::function on_load_callback_; }; class CellStorer { public: CellStorer(KeyValue &kv); td::Status erase(td::Slice hash); - td::Status set(td::int32 refcnt, const DataCell &cell); + td::Status set(td::int32 refcnt, const td::Ref &cell, bool as_boc); private: KeyValue &kv_; diff --git a/crypto/vm/db/DynamicBagOfCellsDb.cpp b/crypto/vm/db/DynamicBagOfCellsDb.cpp index 0d1d099f..1aa4e0f5 100644 --- a/crypto/vm/db/DynamicBagOfCellsDb.cpp +++ b/crypto/vm/db/DynamicBagOfCellsDb.cpp @@ -210,6 +210,14 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat return td::Status::OK(); } + void set_celldb_compress_depth(td::uint32 value) override { + celldb_compress_depth_ = value; + } + + vm::ExtCellCreator& as_ext_cell_creator() override { + return *this; + } + private: std::unique_ptr loader_; std::vector> to_inc_; @@ -217,6 +225,7 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat CellHashTable hash_table_; std::vector visited_; Stats stats_diff_; + td::uint32 celldb_compress_depth_{0}; static td::NamedThreadSafeCounter::CounterRef get_thread_safe_counter() { static auto res = td::NamedThreadSafeCounter::get_default().get_counter("DynamicBagOfCellsDb"); @@ -443,7 +452,8 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat guard.dismiss(); } else { auto loaded_cell = info.cell->load_cell().move_as_ok(); - storer.set(info.db_refcnt, *loaded_cell.data_cell); + storer.set(info.db_refcnt, loaded_cell.data_cell, + loaded_cell.data_cell->get_depth() == celldb_compress_depth_ && celldb_compress_depth_ != 0); info.in_db = true; } } diff --git a/crypto/vm/db/DynamicBagOfCellsDb.h b/crypto/vm/db/DynamicBagOfCellsDb.h index 3569208c..fa2b44d2 100644 --- a/crypto/vm/db/DynamicBagOfCellsDb.h +++ b/crypto/vm/db/DynamicBagOfCellsDb.h @@ -64,6 +64,9 @@ class DynamicBagOfCellsDb { // restart with new loader will also reset stats_diff virtual td::Status set_loader(std::unique_ptr loader) = 0; + virtual void set_celldb_compress_depth(td::uint32 value) = 0; + virtual vm::ExtCellCreator& as_ext_cell_creator() = 0; + static std::unique_ptr create(); class AsyncExecutor { diff --git a/validator-engine/validator-engine.cpp b/validator-engine/validator-engine.cpp index 23cc9347..27327493 100644 --- a/validator-engine/validator-engine.cpp +++ b/validator-engine/validator-engine.cpp @@ -1363,6 +1363,7 @@ td::Status ValidatorEngine::load_global_config() { if (!session_logs_file_.empty()) { validator_options_.write().set_session_logs_file(session_logs_file_); } + validator_options_.write().set_celldb_compress_depth(celldb_compress_depth_); std::vector h; for (auto &x : conf.validator_->hardforks_) { @@ -3761,6 +3762,11 @@ int main(int argc, char *argv[]) { acts.push_back([&x, at]() { td::actor::send_closure(x, &ValidatorEngine::schedule_shutdown, (double)at); }); return td::Status::OK(); }); + p.add_checked_option('\0', "celldb-compress-depth", "(default: 0)", [&](td::Slice arg) { + TRY_RESULT(value, td::to_integer_safe(arg)); + acts.push_back([&x, value]() { td::actor::send_closure(x, &ValidatorEngine::set_celldb_compress_depth, value); }); + return td::Status::OK(); + }); auto S = p.run(argc, argv); if (S.is_error()) { LOG(ERROR) << "failed to parse options: " << S.move_as_error(); diff --git a/validator-engine/validator-engine.hpp b/validator-engine/validator-engine.hpp index 2d966351..ebcd60c6 100644 --- a/validator-engine/validator-engine.hpp +++ b/validator-engine/validator-engine.hpp @@ -200,6 +200,7 @@ class ValidatorEngine : public td::actor::Actor { double sync_ttl_ = 0; double archive_ttl_ = 0; double key_proof_ttl_ = 0; + td::uint32 celldb_compress_depth_ = 0; bool read_config_ = false; bool started_keyring_ = false; bool started_ = false; @@ -257,6 +258,9 @@ class ValidatorEngine : public td::actor::Actor { keys_[key.compute_short_id()] = key; } void schedule_shutdown(double at); + void set_celldb_compress_depth(td::uint32 value) { + celldb_compress_depth_ = value; + } void start_up() override; ValidatorEngine() { } diff --git a/validator/db/celldb.cpp b/validator/db/celldb.cpp index 3b2a34f3..6a2b4699 100644 --- a/validator/db/celldb.cpp +++ b/validator/db/celldb.cpp @@ -62,16 +62,29 @@ void CellDbBase::execute_sync(std::function f) { f(); } -CellDbIn::CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path) - : root_db_(root_db), parent_(parent), path_(std::move(path)) { +CellDbIn::CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path, + td::Ref opts) + : root_db_(root_db), parent_(parent), path_(std::move(path)), opts_(opts) { } void CellDbIn::start_up() { + on_load_callback_ = [db = actor_id(this), + compress_depth = opts_->get_celldb_compress_depth()](const vm::CellLoader::LoadResult& res) { + if (res.cell_.is_null()) { + return; + } + bool expected_stored_boc = res.cell_->get_depth() == compress_depth && compress_depth != 0; + if (expected_stored_boc != res.stored_boc_) { + td::actor::send_closure(db, &CellDbIn::migrate_cell, td::Bits256{res.cell_->get_hash().bits()}); + } + }; + CellDbBase::start_up(); cell_db_ = std::make_shared(td::RocksDb::open(path_).move_as_ok()); boc_ = vm::DynamicBagOfCellsDb::create(); - boc_->set_loader(std::make_unique(cell_db_->snapshot())).ensure(); + boc_->set_celldb_compress_depth(opts_->get_celldb_compress_depth()); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); alarm_timestamp() = td::Timestamp::in(10.0); @@ -129,7 +142,7 @@ void CellDbIn::store_cell(BlockIdExt block_id, td::Ref cell, td::Promi set_block(key_hash, std::move(D)); cell_db_->commit_write_batch().ensure(); - boc_->set_loader(std::make_unique(cell_db_->snapshot())).ensure(); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); promise.set_result(boc_->load_cell(cell->get_hash().as_slice())); @@ -140,6 +153,9 @@ void CellDbIn::get_cell_db_reader(td::Promise> } void CellDbIn::alarm() { + if (migrate_after_ && migrate_after_.is_in_past()) { + migrate_cells(); + } auto E = get_block(get_empty_key_hash()).move_as_ok(); auto N = get_block(E.next).move_as_ok(); if (N.is_empty()) { @@ -220,7 +236,7 @@ void CellDbIn::gc_cont2(BlockHandle handle) { cell_db_->commit_write_batch().ensure(); alarm_timestamp() = td::Timestamp::now(); - boc_->set_loader(std::make_unique(cell_db_->snapshot())).ensure(); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); DCHECK(get_block(key_hash).is_error()); @@ -273,6 +289,49 @@ void CellDbIn::set_block(KeyHash key_hash, DbEntry e) { cell_db_->set(td::as_slice(key), e.release()).ensure(); } +void CellDbIn::migrate_cell(td::Bits256 hash) { + cells_to_migrate_.insert(hash); + if (cells_to_migrate_.size() >= 32) { + migrate_cells(); + } else if (!migrate_after_) { + migrate_after_ = td::Timestamp::in(1.0); + } +} + +void CellDbIn::migrate_cells() { + if (cells_to_migrate_.empty()) { + return; + } + vm::CellStorer stor{*cell_db_}; + auto loader = std::make_unique(cell_db_->snapshot()); + boc_->set_loader(std::make_unique(*loader)).ensure(); + cell_db_->begin_write_batch().ensure(); + td::uint32 cnt = 0; + for (const auto& hash : cells_to_migrate_) { + auto R = loader->load(hash.as_slice(), true, boc_->as_ext_cell_creator()); + if (R.is_error()) { + continue; + } + if (R.ok().status == vm::CellLoader::LoadResult::NotFound) { + continue; + } + bool expected_stored_boc = + R.ok().cell_->get_depth() == opts_->get_celldb_compress_depth() && opts_->get_celldb_compress_depth() != 0; + if (expected_stored_boc != R.ok().stored_boc_) { + ++cnt; + stor.set(R.ok().refcnt(), R.ok().cell_, expected_stored_boc).ensure(); + } + } + cells_to_migrate_.clear(); + if (cnt > 0) { + LOG(DEBUG) << "Migrated " << cnt << " cells"; + } + cell_db_->commit_write_batch().ensure(); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); + td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); + migrate_after_ = td::Timestamp::never(); +} + void CellDb::load_cell(RootHash hash, td::Promise> promise) { if (!started_) { td::actor::send_closure(cell_db_, &CellDbIn::load_cell, hash, std::move(promise)); @@ -300,7 +359,18 @@ void CellDb::get_cell_db_reader(td::Promise> p void CellDb::start_up() { CellDbBase::start_up(); boc_ = vm::DynamicBagOfCellsDb::create(); - cell_db_ = td::actor::create_actor("celldbin", root_db_, actor_id(this), path_); + boc_->set_celldb_compress_depth(opts_->get_celldb_compress_depth()); + cell_db_ = td::actor::create_actor("celldbin", root_db_, actor_id(this), path_, opts_); + on_load_callback_ = [db = cell_db_.get(), + compress_depth = opts_->get_celldb_compress_depth()](const vm::CellLoader::LoadResult& res) { + if (res.cell_.is_null()) { + return; + } + bool expected_stored_boc = res.cell_->get_depth() == compress_depth && compress_depth != 0; + if (expected_stored_boc != res.stored_boc_) { + td::actor::send_closure(db, &CellDbIn::migrate_cell, td::Bits256{res.cell_->get_hash().bits()}); + } + }; } CellDbIn::DbEntry::DbEntry(tl_object_ptr entry) diff --git a/validator/db/celldb.hpp b/validator/db/celldb.hpp index a05e9ddb..6545d597 100644 --- a/validator/db/celldb.hpp +++ b/validator/db/celldb.hpp @@ -25,6 +25,7 @@ #include "ton/ton-types.h" #include "interfaces/block-handle.h" #include "auto/tl/ton_api.h" +#include "validator.h" namespace ton { @@ -53,7 +54,10 @@ class CellDbIn : public CellDbBase { void store_cell(BlockIdExt block_id, td::Ref cell, td::Promise> promise); void get_cell_db_reader(td::Promise> promise); - CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path); + void migrate_cell(td::Bits256 hash); + + CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path, + td::Ref opts); void start_up() override; void alarm() override; @@ -89,13 +93,20 @@ class CellDbIn : public CellDbBase { void gc_cont2(BlockHandle handle); void skip_gc(); + void migrate_cells(); + td::actor::ActorId root_db_; td::actor::ActorId parent_; std::string path_; + td::Ref opts_; std::unique_ptr boc_; std::shared_ptr cell_db_; + + std::function on_load_callback_; + std::set cells_to_migrate_; + td::Timestamp migrate_after_ = td::Timestamp::never(); }; class CellDb : public CellDbBase { @@ -104,11 +115,12 @@ class CellDb : public CellDbBase { void store_cell(BlockIdExt block_id, td::Ref cell, td::Promise> promise); void update_snapshot(std::unique_ptr snapshot) { started_ = true; - boc_->set_loader(std::make_unique(std::move(snapshot))).ensure(); + boc_->set_loader(std::make_unique(std::move(snapshot), on_load_callback_)).ensure(); } void get_cell_db_reader(td::Promise> promise); - CellDb(td::actor::ActorId root_db, std::string path) : root_db_(root_db), path_(path) { + CellDb(td::actor::ActorId root_db, std::string path, td::Ref opts) + : root_db_(root_db), path_(path), opts_(opts) { } void start_up() override; @@ -116,11 +128,14 @@ class CellDb : public CellDbBase { private: td::actor::ActorId root_db_; std::string path_; + td::Ref opts_; td::actor::ActorOwn cell_db_; std::unique_ptr boc_; bool started_ = false; + + std::function on_load_callback_; }; } // namespace validator diff --git a/validator/db/rootdb.cpp b/validator/db/rootdb.cpp index a7a1becf..601b07c1 100644 --- a/validator/db/rootdb.cpp +++ b/validator/db/rootdb.cpp @@ -397,7 +397,7 @@ void RootDb::get_hardforks(td::Promise> promise) { } void RootDb::start_up() { - cell_db_ = td::actor::create_actor("celldb", actor_id(this), root_path_ + "/celldb/"); + cell_db_ = td::actor::create_actor("celldb", actor_id(this), root_path_ + "/celldb/", opts_); state_db_ = td::actor::create_actor("statedb", actor_id(this), root_path_ + "/state/"); static_files_db_ = td::actor::create_actor("staticfilesdb", actor_id(this), root_path_ + "/static/"); archive_db_ = td::actor::create_actor("archive", actor_id(this), root_path_); diff --git a/validator/db/rootdb.hpp b/validator/db/rootdb.hpp index 9b0d52a6..598defcb 100644 --- a/validator/db/rootdb.hpp +++ b/validator/db/rootdb.hpp @@ -26,6 +26,7 @@ #include "statedb.hpp" #include "staticfilesdb.hpp" #include "archive-manager.hpp" +#include "validator.h" namespace ton { @@ -34,8 +35,9 @@ namespace validator { class RootDb : public Db { public: enum class Flags : td::uint32 { f_started = 1, f_ready = 2, f_switched = 4, f_archived = 8 }; - RootDb(td::actor::ActorId validator_manager, std::string root_path) - : validator_manager_(validator_manager), root_path_(std::move(root_path)) { + RootDb(td::actor::ActorId validator_manager, std::string root_path, + td::Ref opts) + : validator_manager_(validator_manager), root_path_(std::move(root_path)), opts_(opts) { } void start_up() override; @@ -138,6 +140,7 @@ class RootDb : public Db { td::actor::ActorId validator_manager_; std::string root_path_; + td::Ref opts_; td::actor::ActorOwn cell_db_; td::actor::ActorOwn state_db_; diff --git a/validator/fabric.h b/validator/fabric.h index 58f0647b..326b17ae 100644 --- a/validator/fabric.h +++ b/validator/fabric.h @@ -20,12 +20,14 @@ #include "interfaces/validator-manager.h" #include "interfaces/db.h" +#include "validator.h" namespace ton { namespace validator { -td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_); +td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_, + td::Ref opts); td::actor::ActorOwn create_liteserver_cache_actor(td::actor::ActorId manager, std::string db_root); diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index c2088f47..b3063993 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -159,6 +159,7 @@ class Collator final : public td::actor::Actor { bool report_version_{false}; bool skip_topmsgdescr_{false}; bool skip_extmsg_{false}; + bool queue_too_big_{false}; bool short_dequeue_records_{false}; td::uint64 overload_history_{0}, underload_history_{0}; td::uint64 block_size_estimate_{}; diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 2030b345..2e4dfa1d 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -703,9 +703,7 @@ bool Collator::unpack_last_mc_state() { return fatal_error(limits.move_as_error()); } block_limits_ = limits.move_as_ok(); - if (!is_masterchain()) { - // block_limits_->bytes = {131072 / 3, 524288 / 3, 1048576 / 3}; - // block_limits_->gas = {2000000 / 3, 10000000 / 3, 20000000 / 3}; + if (now_ > prev_now_ + 15 && block_limits_->lt_delta.hard() > 200) { block_limits_->lt_delta = {20, 180, 200}; } LOG(DEBUG) << "block limits: bytes [" << block_limits_->bytes.underload() << ", " << block_limits_->bytes.soft() @@ -1828,9 +1826,6 @@ bool Collator::init_utime() { if (timeout < new_timeout) { double add = new_timeout.at() - timeout.at(); timeout = new_timeout; - queue_cleanup_timeout_ += add; - soft_timeout_ += add; - medium_timeout_ += add; alarm_timestamp() = timeout; } @@ -2259,6 +2254,7 @@ bool Collator::out_msg_queue_cleanup() { if (outq_cleanup_partial_ || total > 8000) { LOG(INFO) << "out_msg_queue too big, skipping importing external messages"; skip_extmsg_ = true; + queue_too_big_ = true; } auto rt = out_msg_queue_->get_root(); if (verbosity >= 2) { @@ -4170,8 +4166,12 @@ bool Collator::check_block_overload() { << " size_estimate=" << block_size_estimate_; auto cl = block_limit_status_->classify(); if (cl <= block::ParamLimits::cl_underload) { - underload_history_ |= 1; - LOG(INFO) << "block is underloaded"; + if (queue_too_big_) { + LOG(INFO) << "block is underloaded, but don't set underload history because out msg queue is big"; + } else { + underload_history_ |= 1; + LOG(INFO) << "block is underloaded"; + } } else if (cl >= block::ParamLimits::cl_soft) { overload_history_ |= 1; LOG(INFO) << "block is overloaded (category " << cl << ")"; diff --git a/validator/impl/fabric.cpp b/validator/impl/fabric.cpp index 0131fff7..23a03482 100644 --- a/validator/impl/fabric.cpp +++ b/validator/impl/fabric.cpp @@ -39,8 +39,9 @@ namespace ton { namespace validator { -td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_) { - return td::actor::create_actor("db", manager, db_root_); +td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_, + td::Ref opts) { + return td::actor::create_actor("db", manager, db_root_, opts); } td::actor::ActorOwn create_liteserver_cache_actor(td::actor::ActorId manager, diff --git a/validator/manager-disk.cpp b/validator/manager-disk.cpp index 8818c86a..3717d7b0 100644 --- a/validator/manager-disk.cpp +++ b/validator/manager-disk.cpp @@ -901,7 +901,7 @@ void ValidatorManagerImpl::send_top_shard_block_description(td::Ref R) { R.ensure(); diff --git a/validator/manager-hardfork.cpp b/validator/manager-hardfork.cpp index 80a64d25..e290f635 100644 --- a/validator/manager-hardfork.cpp +++ b/validator/manager-hardfork.cpp @@ -549,7 +549,7 @@ void ValidatorManagerImpl::register_block_handle(BlockHandle handle, td::Promise } void ValidatorManagerImpl::start_up() { - db_ = create_db_actor(actor_id(this), db_root_); + db_ = create_db_actor(actor_id(this), db_root_, opts_); } void ValidatorManagerImpl::try_get_static_file(FileHash file_hash, td::Promise promise) { diff --git a/validator/manager.cpp b/validator/manager.cpp index e22616ea..8caed0f6 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -585,6 +585,12 @@ void ValidatorManagerImpl::run_ext_query(td::BufferSlice data, td::Promise> promise) { + auto it0 = block_state_cache_.find(handle->id()); + if (it0 != block_state_cache_.end()) { + it0->second.ttl_ = td::Timestamp::in(30.0); + promise.set_result(it0->second.state_); + return; + } auto it = wait_state_.find(handle->id()); if (it == wait_state_.end()) { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result> R) { @@ -988,6 +994,9 @@ void ValidatorManagerImpl::get_block_by_seqno_from_db(AccountIdPrefixFull accoun } void ValidatorManagerImpl::finished_wait_state(BlockHandle handle, td::Result> R) { + if (R.is_ok()) { + block_state_cache_[handle->id()] = {R.ok(), td::Timestamp::in(30.0)}; + } auto it = wait_state_.find(handle->id()); if (it != wait_state_.end()) { if (R.is_error()) { @@ -1441,7 +1450,7 @@ void ValidatorManagerImpl::send_block_broadcast(BlockBroadcast broadcast) { } void ValidatorManagerImpl::start_up() { - db_ = create_db_actor(actor_id(this), db_root_); + db_ = create_db_actor(actor_id(this), db_root_, opts_); lite_server_cache_ = create_liteserver_cache_actor(actor_id(this), db_root_); token_manager_ = td::actor::create_actor("tokenmanager"); td::mkdir(db_root_ + "/tmp/").ensure(); @@ -2373,6 +2382,31 @@ void ValidatorManagerImpl::alarm() { for (auto &w : shard_client_waiters_) { w.second.check_timers(); } + for (auto it = block_state_cache_.begin(); it != block_state_cache_.end();) { + bool del = it->second.ttl_.is_in_past(); + if (del) { + auto block_id = it->first; + if (block_id.is_masterchain()) { + if (block_id.seqno() == last_masterchain_seqno_) { + it->second.ttl_ = td::Timestamp::in(30.0); + del = false; + } + } else if (last_masterchain_state_.not_null()) { + auto shard = last_masterchain_state_->get_shard_from_config(block_id.shard_full()); + if (shard.not_null()) { + if (block_id.seqno() == shard->top_block_id().seqno()) { + it->second.ttl_ = td::Timestamp::in(30.0); + del = false; + } + } + } + } + if (del) { + it = block_state_cache_.erase(it); + } else { + ++it; + } + } } alarm_timestamp().relax(check_waiters_at_); if (check_shard_clients_.is_in_past()) { diff --git a/validator/manager.hpp b/validator/manager.hpp index d133f83b..ccd85423 100644 --- a/validator/manager.hpp +++ b/validator/manager.hpp @@ -183,6 +183,12 @@ class ValidatorManagerImpl : public ValidatorManager { std::map>> wait_state_; std::map>> wait_block_data_; + struct CachedBlockState { + td::Ref state_; + td::Timestamp ttl_; + }; + std::map block_state_cache_; + struct WaitBlockHandle { std::vector> waiting_; }; diff --git a/validator/validator-options.hpp b/validator/validator-options.hpp index d23d8cc9..3a7e5ba7 100644 --- a/validator/validator-options.hpp +++ b/validator/validator-options.hpp @@ -114,6 +114,9 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { std::string get_session_logs_file() const override { return session_logs_file_; } + td::uint32 get_celldb_compress_depth() const override { + return celldb_compress_depth_; + } void set_zero_block_id(BlockIdExt block_id) override { zero_block_id_ = block_id; @@ -167,6 +170,9 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { void set_session_logs_file(std::string f) override { session_logs_file_ = std::move(f); } + void set_celldb_compress_depth(td::uint32 value) override { + celldb_compress_depth_ = value; + } ValidatorManagerOptionsImpl *make_copy() const override { return new ValidatorManagerOptionsImpl(*this); @@ -209,6 +215,7 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { BlockSeqno truncate_{0}; BlockSeqno sync_upto_{0}; std::string session_logs_file_; + td::uint32 celldb_compress_depth_{0}; }; } // namespace validator diff --git a/validator/validator.h b/validator/validator.h index 0687b160..7cdea805 100644 --- a/validator/validator.h +++ b/validator/validator.h @@ -81,6 +81,7 @@ struct ValidatorManagerOptions : public td::CntObject { virtual BlockSeqno get_truncate_seqno() const = 0; virtual BlockSeqno sync_upto() const = 0; virtual std::string get_session_logs_file() const = 0; + virtual td::uint32 get_celldb_compress_depth() const = 0; virtual void set_zero_block_id(BlockIdExt block_id) = 0; virtual void set_init_block_id(BlockIdExt block_id) = 0; @@ -100,6 +101,7 @@ struct ValidatorManagerOptions : public td::CntObject { virtual void truncate_db(BlockSeqno seqno) = 0; virtual void set_sync_upto(BlockSeqno seqno) = 0; virtual void set_session_logs_file(std::string f) = 0; + virtual void set_celldb_compress_depth(td::uint32 value) = 0; static td::Ref create( BlockIdExt zero_block_id, BlockIdExt init_block_id, From 5e6b67ae96d31a8608375ee12835b11c7721a6d3 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 13 Dec 2023 12:57:34 +0300 Subject: [PATCH 38/71] Improve handling outbound message queues (#825) * Improve handling outbound message queues * Cleanup queue faster * Calculate queue sizes in background * Force or limit split/merge depending on queue size * Increase validate_ref limit for transaction * Add all changes of public libraries to block size estimation * Don't crash on timeout in GC * Don't import external messages when queue is too big --------- Co-authored-by: SpyCheese --- crypto/block/block.cpp | 26 ++- crypto/block/block.h | 12 +- crypto/block/output-queue-merger.cpp | 20 +- crypto/block/output-queue-merger.h | 12 +- crypto/block/transaction.cpp | 37 +++- validator-engine/validator-engine.cpp | 13 ++ validator/CMakeLists.txt | 2 + validator/impl/collator-impl.h | 5 +- validator/impl/collator.cpp | 284 +++++++++++++++++------- validator/manager-disk.hpp | 9 + validator/manager-hardfork.hpp | 9 + validator/manager.cpp | 10 +- validator/manager.hpp | 14 ++ validator/queue-size-counter.cpp | 301 ++++++++++++++++++++++++++ validator/queue-size-counter.hpp | 82 +++++++ validator/validator.h | 2 + 16 files changed, 727 insertions(+), 111 deletions(-) create mode 100644 validator/queue-size-counter.cpp create mode 100644 validator/queue-size-counter.hpp diff --git a/crypto/block/block.cpp b/crypto/block/block.cpp index 9a3dba60..a22fd1e5 100644 --- a/crypto/block/block.cpp +++ b/crypto/block/block.cpp @@ -715,7 +715,7 @@ td::uint64 BlockLimitStatus::estimate_block_size(const vm::NewCellStorageStat::S sum += *extra; } return 2000 + (sum.bits >> 3) + sum.cells * 12 + sum.internal_refs * 3 + sum.external_refs * 40 + accounts * 200 + - transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300 + extra_library_diff * 700; + transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300 + public_library_diff * 700; } int BlockLimitStatus::classify() const { @@ -1009,8 +1009,8 @@ td::Status ShardState::merge_with(ShardState& sib) { return td::Status::OK(); } -td::Result> ShardState::compute_split_out_msg_queue( - ton::ShardIdFull subshard) { +td::Result> ShardState::compute_split_out_msg_queue(ton::ShardIdFull subshard, + td::uint32* queue_size) { auto shard = id_.shard_full(); if (!ton::shard_is_parent(shard, subshard)) { return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() + @@ -1018,7 +1018,7 @@ td::Result> ShardState::compute_split_o } CHECK(out_msg_queue_); auto subqueue = std::make_unique(*out_msg_queue_); - int res = block::filter_out_msg_queue(*subqueue, shard, subshard); + int res = block::filter_out_msg_queue(*subqueue, shard, subshard, queue_size); if (res < 0) { return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str()); } @@ -1040,7 +1040,7 @@ td::Result> ShardState::compu return std::move(sub_processed_upto); } -td::Status ShardState::split(ton::ShardIdFull subshard) { +td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) { if (!ton::shard_is_parent(id_.shard_full(), subshard)) { return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() + " because it is not a parent"); @@ -1058,7 +1058,7 @@ td::Status ShardState::split(ton::ShardIdFull subshard) { auto shard1 = id_.shard_full(); CHECK(ton::shard_is_parent(shard1, subshard)); CHECK(out_msg_queue_); - int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard); + int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, queue_size); if (res1 < 0) { return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str()); } @@ -1098,8 +1098,12 @@ td::Status ShardState::split(ton::ShardIdFull subshard) { return td::Status::OK(); } -int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard) { - return out_queue.filter([subshard, old_shard](vm::CellSlice& cs, td::ConstBitPtr key, int key_len) -> int { +int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard, + td::uint32* queue_size) { + if (queue_size) { + *queue_size = 0; + } + return out_queue.filter([=](vm::CellSlice& cs, td::ConstBitPtr key, int key_len) -> int { CHECK(key_len == 352); LOG(DEBUG) << "scanning OutMsgQueue entry with key " << key.to_hex(key_len); block::tlb::MsgEnvelope::Record_std env; @@ -1122,7 +1126,11 @@ int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull ol << " does not contain current address belonging to shard " << old_shard.to_str(); return -1; } - return ton::shard_contains(subshard, cur_prefix); + bool res = ton::shard_contains(subshard, cur_prefix); + if (res && queue_size) { + ++*queue_size; + } + return res; }); } diff --git a/crypto/block/block.h b/crypto/block/block.h index 09169429..c54949f4 100644 --- a/crypto/block/block.h +++ b/crypto/block/block.h @@ -262,7 +262,7 @@ struct BlockLimitStatus { td::uint64 gas_used{}; vm::NewCellStorageStat st_stat; unsigned accounts{}, transactions{}, extra_out_msgs{}; - unsigned extra_library_diff{}; // Number of public libraries in deleted/frozen accounts + unsigned public_library_diff{}; BlockLimitStatus(const BlockLimits& limits_, ton::LogicalTime lt = 0) : limits(limits_), cur_lt(std::max(limits_.start_lt, lt)) { } @@ -272,7 +272,7 @@ struct BlockLimitStatus { transactions = accounts = 0; gas_used = 0; extra_out_msgs = 0; - extra_library_diff = 0; + public_library_diff = 0; } td::uint64 estimate_block_size(const vm::NewCellStorageStat::Stat* extra = nullptr) const; int classify() const; @@ -433,10 +433,11 @@ struct ShardState { ton::BlockSeqno prev_mc_block_seqno, bool after_split, bool clear_history, std::function for_each_mcseqno); td::Status merge_with(ShardState& sib); - td::Result> compute_split_out_msg_queue(ton::ShardIdFull subshard); + td::Result> compute_split_out_msg_queue(ton::ShardIdFull subshard, + td::uint32* queue_size = nullptr); td::Result> compute_split_processed_upto( ton::ShardIdFull subshard); - td::Status split(ton::ShardIdFull subshard); + td::Status split(ton::ShardIdFull subshard, td::uint32* queue_size = nullptr); td::Status unpack_out_msg_queue_info(Ref out_msg_queue_info); bool clear_load_history() { overload_history_ = underload_history_ = 0; @@ -656,7 +657,8 @@ class MtCarloComputeShare { void gen_vset(); }; -int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard); +int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard, + td::uint32* queue_size = nullptr); std::ostream& operator<<(std::ostream& os, const ShardId& shard_id); diff --git a/crypto/block/output-queue-merger.cpp b/crypto/block/output-queue-merger.cpp index 1084bb1a..aa425f6b 100644 --- a/crypto/block/output-queue-merger.cpp +++ b/crypto/block/output-queue-merger.cpp @@ -146,22 +146,30 @@ bool OutputQueueMerger::add_root(int src, Ref outmsg_root) { return true; } -OutputQueueMerger::OutputQueueMerger(ton::ShardIdFull _queue_for, std::vector _neighbors) +OutputQueueMerger::OutputQueueMerger(ton::ShardIdFull _queue_for, std::vector _neighbors) : queue_for(_queue_for), neighbors(std::move(_neighbors)), eof(false), failed(false) { init(); } +OutputQueueMerger::OutputQueueMerger(ton::ShardIdFull _queue_for, std::vector _neighbors) + : queue_for(_queue_for), eof(false), failed(false) { + for (auto& nb : _neighbors) { + neighbors.emplace_back(nb.top_block_id(), nb.outmsg_root, nb.is_disabled()); + } + init(); +} + void OutputQueueMerger::init() { common_pfx.bits().store_int(queue_for.workchain, 32); int l = queue_for.pfx_len(); td::bitstring::bits_store_long_top(common_pfx.bits() + 32, queue_for.shard, l); common_pfx_len = 32 + l; int i = 0; - for (block::McShardDescr& neighbor : neighbors) { - if (!neighbor.is_disabled()) { - LOG(DEBUG) << "adding " << (neighbor.outmsg_root.is_null() ? "" : "non-") << "empty output queue for neighbor #" - << i << " (" << neighbor.blk_.to_str() << ")"; - add_root(i++, neighbor.outmsg_root); + for (Neighbor& neighbor : neighbors) { + if (!neighbor.disabled_) { + LOG(DEBUG) << "adding " << (neighbor.outmsg_root_.is_null() ? "" : "non-") << "empty output queue for neighbor #" + << i << " (" << neighbor.block_id_.to_str() << ")"; + add_root(i++, neighbor.outmsg_root_); } else { LOG(DEBUG) << "skipping output queue for disabled neighbor #" << i; i++; diff --git a/crypto/block/output-queue-merger.h b/crypto/block/output-queue-merger.h index bf3d8586..07533f24 100644 --- a/crypto/block/output-queue-merger.h +++ b/crypto/block/output-queue-merger.h @@ -51,12 +51,22 @@ struct OutputQueueMerger { bool unpack_node(td::ConstBitPtr key_pfx, int key_pfx_len, Ref node); bool split(MsgKeyValue& second); }; + struct Neighbor { + ton::BlockIdExt block_id_; + td::Ref outmsg_root_; + bool disabled_; + Neighbor() = default; + Neighbor(ton::BlockIdExt block_id, td::Ref outmsg_root, bool disabled = false) + : block_id_(block_id), outmsg_root_(std::move(outmsg_root)), disabled_(disabled) { + } + }; // ton::ShardIdFull queue_for; std::vector> msg_list; - std::vector neighbors; + std::vector neighbors; public: + OutputQueueMerger(ton::ShardIdFull _queue_for, std::vector _neighbors); OutputQueueMerger(ton::ShardIdFull _queue_for, std::vector _neighbors); bool is_eof() const { return eof; diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 62a48cab..7a907337 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -2536,6 +2536,31 @@ static td::uint32 get_public_libraries_count(const td::Ref& libraries) return count; } +/** + * Calculates the number of changes of public libraries in the dictionary. + * + * @param old_libraries The dictionary of account libraries before the transaction. + * @param new_libraries The dictionary of account libraries after the transaction. + * + * @returns The number of changed public libraries. + */ +static td::uint32 get_public_libraries_diff_count(const td::Ref& old_libraries, + const td::Ref& new_libraries) { + td::uint32 count = 0; + vm::Dictionary dict1{old_libraries, 256}; + vm::Dictionary dict2{new_libraries, 256}; + dict1.scan_diff(dict2, [&](td::ConstBitPtr key, int n, Ref val1, Ref val2) -> bool { + CHECK(n == 256); + bool is_public1 = val1.not_null() && block::is_public_library(key, val1); + bool is_public2 = val2.not_null() && block::is_public_library(key, val2); + if (is_public1 != is_public2) { + ++count; + } + return true; + }); + return count; +} + /** * Checks that the new account state fits in the limits. * This function is not called for special accounts. @@ -2979,14 +3004,14 @@ bool Transaction::serialize() { vm::load_cell_slice(root).print_rec(std::cerr); } - if (!block::gen::t_Transaction.validate_ref(root)) { + if (!block::gen::t_Transaction.validate_ref(4096, root)) { LOG(ERROR) << "newly-generated transaction failed to pass automated validation:"; vm::load_cell_slice(root).print_rec(std::cerr); block::gen::t_Transaction.print_ref(std::cerr, root); root.clear(); return false; } - if (!block::tlb::t_Transaction.validate_ref(root)) { + if (!block::tlb::t_Transaction.validate_ref(4096, root)) { LOG(ERROR) << "newly-generated transaction failed to pass hand-written validation:"; vm::load_cell_slice(root).print_rec(std::cerr); block::gen::t_Transaction.print_ref(std::cerr, root); @@ -3187,8 +3212,12 @@ bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) blimst.add_account(is_first))) { return false; } - if (account.is_masterchain() && (was_frozen || was_deleted)) { - blimst.extra_library_diff += get_public_libraries_count(account.orig_library); + if (account.is_masterchain()) { + if (was_frozen || was_deleted) { + blimst.public_library_diff += get_public_libraries_count(account.orig_library); + } else { + blimst.public_library_diff += get_public_libraries_diff_count(account.orig_library, new_library); + } } } return true; diff --git a/validator-engine/validator-engine.cpp b/validator-engine/validator-engine.cpp index 27327493..d98c296c 100644 --- a/validator-engine/validator-engine.cpp +++ b/validator-engine/validator-engine.cpp @@ -3397,6 +3397,19 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_getShardO promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::notready, "no such block"))); return; } + if (!dest) { + td::actor::send_closure( + manager, &ton::validator::ValidatorManagerInterface::get_out_msg_queue_size, handle->id(), + [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_value(create_control_query_error(R.move_as_error_prefix("failed to get queue size: "))); + } else { + promise.set_value(ton::create_serialize_tl_object( + R.move_as_ok())); + } + }); + return; + } td::actor::send_closure( manager, &ton::validator::ValidatorManagerInterface::get_shard_state_from_db, handle, [=, promise = std::move(promise)](td::Result> R) mutable { diff --git a/validator/CMakeLists.txt b/validator/CMakeLists.txt index 068569de..f858e6ff 100644 --- a/validator/CMakeLists.txt +++ b/validator/CMakeLists.txt @@ -52,6 +52,7 @@ set(VALIDATOR_HEADERS invariants.hpp import-db-slice.hpp + queue-size-counter.hpp manager-disk.h manager-disk.hpp @@ -77,6 +78,7 @@ set(VALIDATOR_SOURCE validator-full-id.cpp validator-group.cpp validator-options.cpp + queue-size-counter.cpp downloaders/wait-block-data.cpp downloaders/wait-block-state.cpp diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index b3063993..fe931cd6 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -60,7 +60,6 @@ class Collator final : public td::actor::Actor { bool preinit_complete{false}; bool is_key_block_{false}; bool block_full_{false}; - bool outq_cleanup_partial_{false}; bool inbound_queues_empty_{false}; bool libraries_changed_{false}; bool prev_key_block_exists_{false}; @@ -159,7 +158,6 @@ class Collator final : public td::actor::Actor { bool report_version_{false}; bool skip_topmsgdescr_{false}; bool skip_extmsg_{false}; - bool queue_too_big_{false}; bool short_dequeue_records_{false}; td::uint64 overload_history_{0}, underload_history_{0}; td::uint64 block_size_estimate_{}; @@ -189,6 +187,7 @@ class Collator final : public td::actor::Actor { std::priority_queue, std::greater> new_msgs; std::pair last_proc_int_msg_, first_unproc_int_msg_; std::unique_ptr in_msg_dict, out_msg_dict, out_msg_queue_, sibling_out_msg_queue_; + td::uint32 out_msg_queue_size_ = 0; std::unique_ptr ihr_pending; std::shared_ptr processed_upto_, sibling_processed_upto_; std::unique_ptr block_create_stats_; @@ -227,6 +226,7 @@ class Collator final : public td::actor::Actor { bool fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton::ShardIdFull& owner); bool fix_processed_upto(block::MsgProcessedUptoCollection& upto); void got_neighbor_out_queue(int i, td::Result> res); + void got_out_queue_size(size_t i, td::Result res); bool adjust_shard_config(); bool store_shard_fees(ShardIdFull shard, const block::CurrencyCollection& fees, const block::CurrencyCollection& created); @@ -260,6 +260,7 @@ class Collator final : public td::actor::Actor { bool check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev); bool check_this_shard_mc_info(); bool request_neighbor_msg_queues(); + bool request_out_msg_queue_size(); void update_max_lt(ton::LogicalTime lt); bool is_masterchain() const { return shard_.is_masterchain(); diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 2e4dfa1d..2f4a002a 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -44,6 +44,12 @@ namespace validator { using td::Ref; using namespace std::literals::string_literals; +// Don't increase MERGE_MAX_QUEUE_LIMIT too much: merging requires cleaning the whole queue in out_msg_queue_cleanup +static const td::uint32 FORCE_SPLIT_QUEUE_SIZE = 4096; +static const td::uint32 SPLIT_MAX_QUEUE_SIZE = 100000; +static const td::uint32 MERGE_MAX_QUEUE_SIZE = 2047; +static const td::uint32 SKIP_EXTERNALS_QUEUE_SIZE = 8000; + #define DBG(__n) dbg(__n)&& #define DSTART int __dcnt = 0; #define DEB DBG(++__dcnt) @@ -790,6 +796,26 @@ bool Collator::request_neighbor_msg_queues() { return true; } +/** + * Requests the size of the outbound message queue from the previous state(s). + * +* @returns True if the request was successful, false otherwise. + */ +bool Collator::request_out_msg_queue_size() { + if (after_split_) { + // If block is after split, the size is calculated during split (see Collator::split_last_state) + return true; + } + for (size_t i = 0; i < prev_blocks.size(); ++i) { + ++pending; + send_closure_later(manager, &ValidatorManager::get_out_msg_queue_size, prev_blocks[i], + [self = get_self(), i](td::Result res) { + td::actor::send_closure(std::move(self), &Collator::got_out_queue_size, i, std::move(res)); + }); + } + return true; +} + /** * Handles the result of obtaining the outbound queue for a neighbor. * @@ -854,6 +880,27 @@ void Collator::got_neighbor_out_queue(int i, td::Result> res) check_pending(); } +/** + * Handles the result of obtaining the size of the outbound message queue. + * + * If the block is after merge then the two sizes are added. + * + * @param i The index of the previous block (0 or 1). + * @param res The result object containing the size of the queue. + */ +void Collator::got_out_queue_size(size_t i, td::Result res) { + --pending; + if (res.is_error()) { + fatal_error( + res.move_as_error_prefix(PSTRING() << "failed to get message queue size from prev block #" << i << ": ")); + return; + } + td::uint32 size = res.move_as_ok(); + LOG(DEBUG) << "got outbound queue size from prev block #" << i << ": " << size; + out_msg_queue_size_ += size; + check_pending(); +} + /** * Unpacks and merges the states of two previous blocks. * Used if the block is after_merge. @@ -972,7 +1019,7 @@ bool Collator::split_last_state(block::ShardState& ss) { return fatal_error(res2.move_as_error()); } sibling_processed_upto_ = res2.move_as_ok(); - auto res3 = ss.split(shard_); + auto res3 = ss.split(shard_, &out_msg_queue_size_); if (res3.is_error()) { return fatal_error(std::move(res3)); } @@ -1449,6 +1496,9 @@ bool Collator::do_preinit() { if (!request_neighbor_msg_queues()) { return false; } + if (!request_out_msg_queue_size()) { + return false; + } return true; } @@ -1824,7 +1874,6 @@ bool Collator::init_utime() { // Extend collator timeout if previous block is too old td::Timestamp new_timeout = td::Timestamp::in(std::min(30.0, (td::Clocks::system() - (double)prev_now_) / 2)); if (timeout < new_timeout) { - double add = new_timeout.at() - timeout.at(); timeout = new_timeout; alarm_timestamp() = timeout; } @@ -2174,95 +2223,144 @@ bool Collator::out_msg_queue_cleanup() { block::gen::t_OutMsgQueue.print(std::cerr, *rt); rt->print_rec(std::cerr); } - for (const auto& nb : neighbors_) { - if (!nb.is_disabled() && (!nb.processed_upto || !nb.processed_upto->can_check_processed())) { - return fatal_error(-667, PSTRING() << "internal error: no info for checking processed messages from neighbor " - << nb.blk_.to_str()); - } - } - auto queue_root = out_msg_queue_->get_root_cell(); - if (queue_root.is_null()) { - LOG(DEBUG) << "out_msg_queue is empty"; - return true; - } - auto old_out_msg_queue = std::make_unique(queue_root, 352, block::tlb::aug_OutMsgQueue); + if (after_merge_) { + // We need to clean the whole queue after merge + // Queue is not too big, see const MERGE_MAX_QUEUE_SIZE + for (const auto& nb : neighbors_) { + if (!nb.is_disabled() && (!nb.processed_upto || !nb.processed_upto->can_check_processed())) { + return fatal_error(-667, PSTRING() << "internal error: no info for checking processed messages from neighbor " + << nb.blk_.to_str()); + } + } + td::uint32 deleted = 0; + auto res = out_msg_queue_->filter([&](vm::CellSlice& cs, td::ConstBitPtr key, int n) -> int { + assert(n == 352); + block::EnqueuedMsgDescr enq_msg_descr; + unsigned long long created_lt; + if (!(cs.fetch_ulong_bool(64, created_lt) // augmentation + && enq_msg_descr.unpack(cs) // unpack EnqueuedMsg + && enq_msg_descr.check_key(key) // check key + && enq_msg_descr.lt_ == created_lt)) { + LOG(ERROR) << "cannot unpack EnqueuedMsg with key " << key.to_hex(n); + return -1; + } + LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_; + bool delivered = false; + ton::LogicalTime deliver_lt = 0; + for (const auto& neighbor : neighbors_) { + // could look up neighbor with shard containing enq_msg_descr.next_prefix more efficiently + // (instead of checking all neighbors) + if (!neighbor.is_disabled() && neighbor.processed_upto->already_processed(enq_msg_descr)) { + delivered = true; + deliver_lt = neighbor.end_lt(); + break; + } + } + if (delivered) { + ++deleted; + CHECK(out_msg_queue_size_ > 0); + --out_msg_queue_size_; + LOG(DEBUG) << "outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex() + << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ << " has been already delivered, dequeueing"; + if (!dequeue_message(std::move(enq_msg_descr.msg_env_), deliver_lt)) { + fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") by inserting a msg_export_deq record"); + return -1; + } + register_out_msg_queue_op(); + if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { + block_full_ = true; + } + } + return !delivered; + }); + LOG(INFO) << "deleted " << deleted << " messages from out_msg_queue after merge, remaining queue size is " + << out_msg_queue_size_; + if (res < 0) { + return fatal_error("error scanning/updating OutMsgQueue"); + } + } else { + std::vector> queue_parts; - int deleted = 0; - int total = 0; - bool fail = false; - old_out_msg_queue->check_for_each([&](Ref value, td::ConstBitPtr key, int n) -> bool { - ++total; - assert(n == 352); - vm::CellSlice& cs = value.write(); - // LOG(DEBUG) << "key is " << key.to_hex(n); - if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) { - LOG(WARNING) << "cleaning up outbound queue takes too long, ending"; - outq_cleanup_partial_ = true; - return false; // retain all remaining outbound queue entries including this one without processing + block::OutputQueueMerger::Neighbor this_queue{BlockIdExt{new_id} /* block id is only used for logs */, + out_msg_queue_->get_root_cell()}; + for (const auto& nb : neighbors_) { + if (nb.is_disabled()) { + continue; + } + if (!nb.processed_upto || !nb.processed_upto->can_check_processed()) { + return fatal_error(-667, PSTRING() << "internal error: no info for checking processed messages from neighbor " + << nb.blk_.to_str()); + } + queue_parts.emplace_back(block::OutputQueueMerger{nb.shard(), {this_queue}}, &nb); } - if (block_full_) { - LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially"; - outq_cleanup_partial_ = true; - return false; // retain all remaining outbound queue entries including this one without processing - } - block::EnqueuedMsgDescr enq_msg_descr; - unsigned long long created_lt; - if (!(cs.fetch_ulong_bool(64, created_lt) // augmentation - && enq_msg_descr.unpack(cs) // unpack EnqueuedMsg - && enq_msg_descr.check_key(key) // check key - && enq_msg_descr.lt_ == created_lt)) { - LOG(ERROR) << "cannot unpack EnqueuedMsg with key " << key.to_hex(n); - fail = true; - return false; - } - LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," - << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_; - bool delivered = false; - ton::LogicalTime deliver_lt = 0; - for (const auto& neighbor : neighbors_) { - // could look up neighbor with shard containing enq_msg_descr.next_prefix more efficiently - // (instead of checking all neighbors) - if (!neighbor.is_disabled() && neighbor.processed_upto->already_processed(enq_msg_descr)) { - delivered = true; - deliver_lt = neighbor.end_lt(); + + size_t i = 0; + td::uint32 deleted = 0; + while (!queue_parts.empty()) { + if (block_full_) { + LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially"; break; } - } - if (delivered) { - LOG(DEBUG) << "outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex() - << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ << " has been already delivered, dequeueing"; - ++deleted; - out_msg_queue_->lookup_delete_with_extra(key, n); - if (!dequeue_message(std::move(enq_msg_descr.msg_env_), deliver_lt)) { - fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," - << enq_msg_descr.hash_.to_hex() << ") by inserting a msg_export_deq record"); - fail = true; - return false; + if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) { + LOG(WARNING) << "cleaning up outbound queue takes too long, ending"; + break; } - register_out_msg_queue_op(); - if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { - block_full_ = true; + if (i == queue_parts.size()) { + i = 0; } + auto& queue = queue_parts.at(i).first; + auto nb = queue_parts.at(i).second; + auto kv = queue.extract_cur(); + if (kv) { + block::EnqueuedMsgDescr enq_msg_descr; + if (!(enq_msg_descr.unpack(kv->msg.write()) // unpack EnqueuedMsg + && enq_msg_descr.check_key(kv->key.cbits()) // check key + )) { + return fatal_error(PSTRING() << "error scanning/updating OutMsgQueue: cannot unpack EnqueuedMsg with key " + << kv->key.to_hex()); + } + if (nb->processed_upto->already_processed(enq_msg_descr)) { + LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ + << ": message has been already delivered, dequeueing"; + ++deleted; + CHECK(out_msg_queue_size_ > 0); + --out_msg_queue_size_; + out_msg_queue_->lookup_delete_with_extra(kv->key.cbits(), kv->key_len); + if (!dequeue_message(std::move(enq_msg_descr.msg_env_), nb->end_lt())) { + return fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ + << "," << enq_msg_descr.hash_.to_hex() + << ") by inserting a msg_export_deq record"); + } + register_out_msg_queue_op(); + if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { + block_full_ = true; + } + queue.next(); + ++i; + continue; + } else { + LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ + << ": message has not been delivered"; + } + } + LOG(DEBUG) << "no more unprocessed messages to shard " << nb->shard().to_str(); + std::swap(queue_parts[i], queue_parts.back()); + queue_parts.pop_back(); } - return true; - }, false, true /* random order */); - LOG(INFO) << "deleted " << deleted << " messages from out_msg_queue, processed " << total << " messages in total"; - if (fail) { - return fatal_error("error scanning/updating OutMsgQueue"); + LOG(INFO) << "deleted " << deleted << " messages from out_msg_queue, remaining queue size is " + << out_msg_queue_size_; } - if (outq_cleanup_partial_ || total > 8000) { - LOG(INFO) << "out_msg_queue too big, skipping importing external messages"; - skip_extmsg_ = true; - queue_too_big_ = true; - } - auto rt = out_msg_queue_->get_root(); if (verbosity >= 2) { + auto rt = out_msg_queue_->get_root(); std::cerr << "new out_msg_queue is "; block::gen::t_OutMsgQueue.print(std::cerr, *rt); rt->print_rec(std::cerr); } - // CHECK(block::gen::t_OutMsgQueue.validate_upto(100000, *rt)); // DEBUG, comment later if SLOW return register_out_msg_queue_op(true); } @@ -3047,6 +3145,7 @@ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_ try { LOG(DEBUG) << "inserting into outbound queue message with (lt,key)=(" << start_lt << "," << key.to_hex() << ")"; ok = out_msg_queue_->set_builder(key.bits(), 352, cb, vm::Dictionary::SetMode::Add); + ++out_msg_queue_size_; } catch (vm::VmError) { ok = false; } @@ -3069,6 +3168,8 @@ bool Collator::delete_out_msg_queue_msg(td::ConstBitPtr key) { try { LOG(DEBUG) << "deleting from outbound queue message with key=" << key.to_hex(352); queue_rec = out_msg_queue_->lookup_delete(key, 352); + CHECK(out_msg_queue_size_ > 0); + --out_msg_queue_size_; } catch (vm::VmError err) { LOG(ERROR) << "error deleting from out_msg_queue dictionary: " << err.get_msg(); } @@ -3309,8 +3410,9 @@ bool Collator::process_inbound_external_messages() { LOG(INFO) << "skipping processing of inbound external messages"; return true; } - if (out_msg_queue_->get_root_cell().not_null() && out_msg_queue_->get_root_cell()->get_depth() > 12) { - LOG(INFO) << "skipping processing of inbound external messages: out msg queue is too big"; + if (out_msg_queue_size_ > SKIP_EXTERNALS_QUEUE_SIZE) { + LOG(INFO) << "skipping processing of inbound external messages because out_msg_queue is too big (" + << out_msg_queue_size_ << " > " << SKIP_EXTERNALS_QUEUE_SIZE << ")"; return true; } bool full = !block_limit_status_->fits(block::ParamLimits::cl_soft); @@ -3550,6 +3652,7 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema LOG(DEBUG) << "inserting into outbound queue a new message with (lt,key)=(" << start_lt << "," << key.to_hex() << ")"; ok = out_msg_queue_->set_builder(key.bits(), 352, cb, vm::Dictionary::SetMode::Add); + ++out_msg_queue_size_; } catch (vm::VmError) { ok = false; } @@ -4158,6 +4261,7 @@ static int history_weight(td::uint64 history) { * @returns True if the check is successful. */ bool Collator::check_block_overload() { + LOG(INFO) << "final out_msg_queue size is " << out_msg_queue_size_; overload_history_ <<= 1; underload_history_ <<= 1; block_size_estimate_ = block_limit_status_->estimate_block_size(); @@ -4166,18 +4270,32 @@ bool Collator::check_block_overload() { << " size_estimate=" << block_size_estimate_; auto cl = block_limit_status_->classify(); if (cl <= block::ParamLimits::cl_underload) { - if (queue_too_big_) { - LOG(INFO) << "block is underloaded, but don't set underload history because out msg queue is big"; + if (out_msg_queue_size_ > MERGE_MAX_QUEUE_SIZE) { + LOG(INFO) + << "block is underloaded, but don't set underload history because out_msg_queue size is too big to merge (" + << out_msg_queue_size_ << " > " << MERGE_MAX_QUEUE_SIZE << ")"; } else { underload_history_ |= 1; LOG(INFO) << "block is underloaded"; } } else if (cl >= block::ParamLimits::cl_soft) { - overload_history_ |= 1; - LOG(INFO) << "block is overloaded (category " << cl << ")"; + if (out_msg_queue_size_ > SPLIT_MAX_QUEUE_SIZE) { + LOG(INFO) << "block is overloaded (category " << cl + << "), but don't set overload history because out_msg_queue size is too big to split (" + << out_msg_queue_size_ << " > " << SPLIT_MAX_QUEUE_SIZE << ")"; + } else { + overload_history_ |= 1; + LOG(INFO) << "block is overloaded (category " << cl << ")"; + } } else { LOG(INFO) << "block is loaded normally"; } + if (!(overload_history_ & 1) && out_msg_queue_size_ >= FORCE_SPLIT_QUEUE_SIZE && + out_msg_queue_size_ <= SPLIT_MAX_QUEUE_SIZE) { + overload_history_ |= 1; + LOG(INFO) << "setting overload history because out_msg_queue reached force split limit (" << out_msg_queue_size_ + << " >= " << FORCE_SPLIT_QUEUE_SIZE << ")"; + } if (collator_settings & 1) { LOG(INFO) << "want_split manually set"; want_split_ = true; diff --git a/validator/manager-disk.hpp b/validator/manager-disk.hpp index 2745e02e..1c3f4f5b 100644 --- a/validator/manager-disk.hpp +++ b/validator/manager-disk.hpp @@ -23,6 +23,7 @@ #include "validator-group.hpp" #include "manager-init.h" #include "manager-disk.h" +#include "queue-size-counter.hpp" #include #include @@ -376,6 +377,13 @@ class ValidatorManagerImpl : public ValidatorManager { void log_validator_session_stats(BlockIdExt block_id, validatorsession::ValidatorSessionStats stats) override { UNREACHABLE(); } + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + if (queue_size_counter_.empty()) { + queue_size_counter_ = + td::actor::create_actor("queuesizecounter", td::Ref{}, actor_id(this)); + } + td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); + } private: PublicKeyHash local_id_; @@ -393,6 +401,7 @@ class ValidatorManagerImpl : public ValidatorManager { int pending_new_shard_block_descr_{0}; std::vector>>> waiting_new_shard_block_descr_; + td::actor::ActorOwn queue_size_counter_; void update_shards(); void update_shard_blocks(); diff --git a/validator/manager-hardfork.hpp b/validator/manager-hardfork.hpp index 6a145191..675c2304 100644 --- a/validator/manager-hardfork.hpp +++ b/validator/manager-hardfork.hpp @@ -23,6 +23,7 @@ #include "validator-group.hpp" #include "manager-init.h" #include "manager-hardfork.h" +#include "queue-size-counter.hpp" #include #include @@ -437,6 +438,13 @@ class ValidatorManagerImpl : public ValidatorManager { void log_validator_session_stats(BlockIdExt block_id, validatorsession::ValidatorSessionStats stats) override { UNREACHABLE(); } + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + if (queue_size_counter_.empty()) { + queue_size_counter_ = + td::actor::create_actor("queuesizecounter", td::Ref{}, actor_id(this)); + } + td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); + } private: td::Ref opts_; @@ -445,6 +453,7 @@ class ValidatorManagerImpl : public ValidatorManager { std::string db_root_; ShardIdFull shard_to_generate_; BlockIdExt block_to_generate_; + td::actor::ActorOwn queue_size_counter_; }; } // namespace validator diff --git a/validator/manager.cpp b/validator/manager.cpp index 8caed0f6..54d272bc 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -2277,7 +2277,15 @@ void ValidatorManagerImpl::allow_block_info_gc(BlockIdExt block_id, td::Promise< void ValidatorManagerImpl::got_next_gc_masterchain_handle(BlockHandle handle) { CHECK(gc_advancing_); auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result> R) { - R.ensure(); + if (R.is_error()) { + if (R.error().code() == ErrorCode::timeout) { + LOG(ERROR) << "Failed to get gc masterchain state, retrying: " << R.move_as_error(); + td::actor::send_closure(SelfId, &ValidatorManagerImpl::got_next_gc_masterchain_handle, std::move(handle)); + } else { + LOG(FATAL) << "Failed to get gc masterchain state: " << R.move_as_error(); + } + return; + } td::actor::send_closure(SelfId, &ValidatorManagerImpl::got_next_gc_masterchain_state, std::move(handle), td::Ref{R.move_as_ok()}); }); diff --git a/validator/manager.hpp b/validator/manager.hpp index ccd85423..9f51cc27 100644 --- a/validator/manager.hpp +++ b/validator/manager.hpp @@ -28,6 +28,7 @@ #include "state-serializer.hpp" #include "rldp/rldp.h" #include "token-manager.h" +#include "queue-size-counter.hpp" #include #include @@ -548,6 +549,18 @@ class ValidatorManagerImpl : public ValidatorManager { void log_validator_session_stats(BlockIdExt block_id, validatorsession::ValidatorSessionStats stats) override; + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + if (queue_size_counter_.empty()) { + if (last_masterchain_state_.is_null()) { + promise.set_error(td::Status::Error(ErrorCode::notready, "not ready")); + return; + } + queue_size_counter_ = td::actor::create_actor("queuesizecounter", + last_masterchain_state_, actor_id(this)); + } + td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); + } + private: td::Timestamp resend_shard_blocks_at_; td::Timestamp check_waiters_at_; @@ -612,6 +625,7 @@ class ValidatorManagerImpl : public ValidatorManager { private: std::map> shard_client_waiters_; + td::actor::ActorOwn queue_size_counter_; }; } // namespace validator diff --git a/validator/queue-size-counter.cpp b/validator/queue-size-counter.cpp new file mode 100644 index 00000000..4780f202 --- /dev/null +++ b/validator/queue-size-counter.cpp @@ -0,0 +1,301 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "queue-size-counter.hpp" +#include "block/block-auto.h" +#include "block/block-parse.h" +#include "common/delay.h" +#include "td/actor/MultiPromise.h" +#include "td/utils/Random.h" + +namespace ton::validator { + +static td::Result calc_queue_size(const td::Ref &state) { + td::uint32 size = 0; + TRY_RESULT(outq_descr, state->message_queue()); + block::gen::OutMsgQueueInfo::Record qinfo; + if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) { + return td::Status::Error("invalid message queue"); + } + vm::AugmentedDictionary queue{qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue}; + bool ok = queue.check_for_each([&](td::Ref, td::ConstBitPtr, int) -> bool { + ++size; + return true; + }); + if (!ok) { + return td::Status::Error("invalid message queue dict"); + } + return size; +} + +static td::Result recalc_queue_size(const td::Ref &state, const td::Ref &prev_state, + td::uint32 prev_size) { + TRY_RESULT(outq_descr, state->message_queue()); + block::gen::OutMsgQueueInfo::Record qinfo; + if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) { + return td::Status::Error("invalid message queue"); + } + vm::AugmentedDictionary queue{qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue}; + + TRY_RESULT(prev_outq_descr, prev_state->message_queue()); + block::gen::OutMsgQueueInfo::Record prev_qinfo; + if (!tlb::unpack_cell(prev_outq_descr->root_cell(), prev_qinfo)) { + return td::Status::Error("invalid message queue"); + } + vm::AugmentedDictionary prev_queue{prev_qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue}; + td::uint32 add = 0, rem = 0; + bool ok = prev_queue.scan_diff( + queue, [&](td::ConstBitPtr, int, td::Ref prev_val, td::Ref new_val) -> bool { + if (prev_val.not_null()) { + ++rem; + } + if (new_val.not_null()) { + ++add; + } + return true; + }); + if (!ok) { + return td::Status::Error("invalid message queue dict"); + } + if (prev_size + add < rem) { + return td::Status::Error("negative value"); + } + return prev_size + add - rem; +} + +void QueueSizeCounter::start_up() { + if (init_masterchain_state_.is_null()) { + // Used in manager-hardfork or manager-disk + simple_mode_ = true; + return; + } + current_seqno_ = init_masterchain_state_->get_seqno(); + process_top_shard_blocks_cont(init_masterchain_state_, true); + init_masterchain_state_ = {}; + alarm(); +} + +void QueueSizeCounter::get_queue_size(BlockIdExt block_id, td::Promise promise) { + get_queue_size_ex(block_id, simple_mode_ || is_block_too_old(block_id), std::move(promise)); +} + +void QueueSizeCounter::get_queue_size_ex(ton::BlockIdExt block_id, bool calc_whole, td::Promise promise) { + Entry &entry = results_[block_id]; + if (entry.done_) { + promise.set_result(entry.queue_size_); + return; + } + entry.promises_.push_back(std::move(promise)); + if (entry.started_) { + return; + } + entry.started_ = true; + entry.calc_whole_ = calc_whole; + td::actor::send_closure(manager_, &ValidatorManager::get_block_handle, block_id, true, + [SelfId = actor_id(this), block_id, manager = manager_](td::Result R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, block_id, R.move_as_error()); + return; + } + BlockHandle handle = R.move_as_ok(); + td::actor::send_closure( + manager, &ValidatorManager::wait_block_state, handle, 0, td::Timestamp::in(10.0), + [SelfId, handle](td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, handle->id(), + R.move_as_error()); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::get_queue_size_cont, + std::move(handle), R.move_as_ok()); + }); + }); +} + +void QueueSizeCounter::get_queue_size_cont(BlockHandle handle, td::Ref state) { + Entry &entry = results_[handle->id()]; + CHECK(entry.started_); + bool calc_whole = entry.calc_whole_ || handle->id().seqno() == 0; + if (!calc_whole) { + CHECK(handle->inited_prev()); + auto prev_blocks = handle->prev(); + bool after_split = prev_blocks.size() == 1 && handle->id().shard_full() != prev_blocks[0].shard_full(); + bool after_merge = prev_blocks.size() == 2; + calc_whole = after_split || after_merge; + } + if (calc_whole) { + auto r_size = calc_queue_size(state); + if (r_size.is_error()) { + on_error(handle->id(), r_size.move_as_error()); + return; + } + entry.done_ = true; + entry.queue_size_ = r_size.move_as_ok(); + for (auto &promise : entry.promises_) { + promise.set_result(entry.queue_size_); + } + entry.promises_.clear(); + return; + } + + auto prev_block_id = handle->one_prev(true); + get_queue_size(prev_block_id, [=, SelfId = actor_id(this), manager = manager_](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, state->get_block_id(), R.move_as_error()); + return; + } + td::uint32 prev_size = R.move_as_ok(); + td::actor::send_closure( + manager, &ValidatorManager::wait_block_state_short, prev_block_id, 0, td::Timestamp::in(10.0), + [=](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, state->get_block_id(), R.move_as_error()); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::get_queue_size_cont2, state, R.move_as_ok(), prev_size); + }); + }); +} + +void QueueSizeCounter::get_queue_size_cont2(td::Ref state, td::Ref prev_state, + td::uint32 prev_size) { + BlockIdExt block_id = state->get_block_id(); + Entry &entry = results_[block_id]; + CHECK(entry.started_); + auto r_size = recalc_queue_size(state, prev_state, prev_size); + if (r_size.is_error()) { + on_error(block_id, r_size.move_as_error()); + return; + } + entry.done_ = true; + entry.queue_size_ = r_size.move_as_ok(); + for (auto &promise : entry.promises_) { + promise.set_result(entry.queue_size_); + } + entry.promises_.clear(); +} + +void QueueSizeCounter::on_error(ton::BlockIdExt block_id, td::Status error) { + auto it = results_.find(block_id); + if (it == results_.end()) { + return; + } + Entry &entry = it->second; + CHECK(!entry.done_); + for (auto &promise : entry.promises_) { + promise.set_error(error.clone()); + } + results_.erase(it); +} + +void QueueSizeCounter::process_top_shard_blocks() { + LOG(DEBUG) << "QueueSizeCounter::process_top_shard_blocks seqno=" << current_seqno_; + td::actor::send_closure( + manager_, &ValidatorManager::get_block_by_seqno_from_db, AccountIdPrefixFull{masterchainId, 0}, current_seqno_, + [SelfId = actor_id(this), manager = manager_](td::Result R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to get masterchain block id: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks); }, + td::Timestamp::in(5.0)); + return; + } + td::actor::send_closure( + manager, &ValidatorManager::wait_block_state_short, R.ok()->id(), 0, td::Timestamp::in(10.0), + [=](td::Result> R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to get masterchain state: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks); }, + td::Timestamp::in(5.0)); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks_cont, + td::Ref(R.move_as_ok()), false); + }); + }); +} + +void QueueSizeCounter::process_top_shard_blocks_cont(td::Ref state, bool init) { + LOG(DEBUG) << "QueueSizeCounter::process_top_shard_blocks_cont seqno=" << current_seqno_ << " init=" << init; + td::MultiPromise mp; + auto ig = mp.init_guard(); + last_top_blocks_.clear(); + last_top_blocks_.push_back(state->get_block_id()); + for (auto &shard : state->get_shards()) { + last_top_blocks_.push_back(shard->top_block_id()); + } + for (const BlockIdExt &block_id : last_top_blocks_) { + get_queue_size_ex_retry(block_id, init, ig.get_promise()); + } + ig.add_promise([SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks_finish); + }); + if (init) { + init_top_blocks_ = last_top_blocks_; + } +} + +void QueueSizeCounter::get_queue_size_ex_retry(BlockIdExt block_id, bool calc_whole, td::Promise promise) { + get_queue_size_ex(block_id, calc_whole, + [=, promise = std::move(promise), SelfId = actor_id(this)](td::Result R) mutable { + if (R.is_error()) { + LOG(WARNING) << "Failed to calculate queue size for block " << block_id.to_str() << ": " + << R.move_as_error(); + delay_action( + [=, promise = std::move(promise)]() mutable { + td::actor::send_closure(SelfId, &QueueSizeCounter::get_queue_size_ex_retry, block_id, + calc_whole, std::move(promise)); + }, + td::Timestamp::in(5.0)); + return; + } + promise.set_result(td::Unit()); + }); +} + +void QueueSizeCounter::process_top_shard_blocks_finish() { + ++current_seqno_; + wait_shard_client(); +} + +void QueueSizeCounter::wait_shard_client() { + LOG(DEBUG) << "QueueSizeCounter::wait_shard_client seqno=" << current_seqno_; + td::actor::send_closure( + manager_, &ValidatorManager::wait_shard_client_state, current_seqno_, td::Timestamp::in(60.0), + [SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + delay_action([=]() mutable { td::actor::send_closure(SelfId, &QueueSizeCounter::wait_shard_client); }, + td::Timestamp::in(5.0)); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks); + }); +} + +void QueueSizeCounter::alarm() { + for (auto it = results_.begin(); it != results_.end();) { + if (it->second.done_ && is_block_too_old(it->first)) { + it = results_.erase(it); + } else { + ++it; + } + } + alarm_timestamp() = td::Timestamp::in(td::Random::fast(20.0, 40.0)); +} + +} // namespace ton::validator \ No newline at end of file diff --git a/validator/queue-size-counter.hpp b/validator/queue-size-counter.hpp new file mode 100644 index 00000000..fabb0cec --- /dev/null +++ b/validator/queue-size-counter.hpp @@ -0,0 +1,82 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#pragma once +#include "interfaces/validator-manager.h" + +namespace ton::validator { + +class QueueSizeCounter : public td::actor::Actor { + public: + QueueSizeCounter(td::Ref last_masterchain_state, td::actor::ActorId manager) + : init_masterchain_state_(last_masterchain_state), manager_(std::move(manager)) { + } + + void start_up() override; + void get_queue_size(BlockIdExt block_id, td::Promise promise); + void alarm() override; + + private: + td::Ref init_masterchain_state_; + td::actor::ActorId manager_; + bool simple_mode_ = false; + + BlockSeqno current_seqno_ = 0; + std::vector init_top_blocks_; + std::vector last_top_blocks_; + + struct Entry { + bool started_ = false; + bool done_ = false; + bool calc_whole_ = false; + td::uint32 queue_size_ = 0; + std::vector> promises_; + }; + std::map results_; + + void get_queue_size_ex(BlockIdExt block_id, bool calc_whole, td::Promise promise); + void get_queue_size_cont(BlockHandle handle, td::Ref state); + void get_queue_size_cont2(td::Ref state, td::Ref prev_state, td::uint32 prev_size); + void on_error(BlockIdExt block_id, td::Status error); + + void process_top_shard_blocks(); + void process_top_shard_blocks_cont(td::Ref state, bool init = false); + void get_queue_size_ex_retry(BlockIdExt block_id, bool calc_whole, td::Promise promise); + void process_top_shard_blocks_finish(); + void wait_shard_client(); + + bool is_block_too_old(const BlockIdExt& block_id) const { + for (const BlockIdExt& top_block : last_top_blocks_) { + if (shard_intersects(block_id.shard_full(), top_block.shard_full())) { + if (block_id.seqno() + 100 < top_block.seqno()) { + return true; + } + break; + } + } + for (const BlockIdExt& init_top_block : init_top_blocks_) { + if (shard_intersects(block_id.shard_full(), init_top_block.shard_full())) { + if (block_id.seqno() < init_top_block.seqno()) { + return true; + } + break; + } + } + return false; + } +}; + +} // namespace ton::validator diff --git a/validator/validator.h b/validator/validator.h index 7cdea805..2fefb064 100644 --- a/validator/validator.h +++ b/validator/validator.h @@ -228,6 +228,8 @@ class ValidatorManagerInterface : public td::actor::Actor { virtual void prepare_perf_timer_stats(td::Promise> promise) = 0; virtual void add_perf_timer_stat(std::string name, double duration) = 0; + virtual void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) = 0; + }; } // namespace validator From 7a457ca2781bc2f69a90ac5e191942668c3170f7 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 13 Dec 2023 15:12:58 +0300 Subject: [PATCH 39/71] Fix linking error (#827) * Fix linking error --------- Co-authored-by: SpyCheese --- validator/CMakeLists.txt | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/validator/CMakeLists.txt b/validator/CMakeLists.txt index f858e6ff..4ecc865c 100644 --- a/validator/CMakeLists.txt +++ b/validator/CMakeLists.txt @@ -100,7 +100,8 @@ set(DISK_VALIDATOR_SOURCE validator-full-id.cpp validator-group.cpp validator-options.cpp - + queue-size-counter.cpp + downloaders/wait-block-data-disk.cpp downloaders/wait-block-state.cpp downloaders/wait-block-state-merge.cpp @@ -119,7 +120,8 @@ set(HARDFORK_VALIDATOR_SOURCE validator-full-id.cpp validator-group.cpp validator-options.cpp - + queue-size-counter.cpp + downloaders/wait-block-data-disk.cpp downloaders/wait-block-state.cpp downloaders/wait-block-state-merge.cpp From 1fc4a0faed069f6c68c84fa41288bdd2d8665128 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 13 Dec 2023 21:33:54 +0300 Subject: [PATCH 40/71] Move low blockrate lt_limits to appropriate place (#828) * Fix setting lt_delta limits --------- Co-authored-by: SpyCheese --- validator/impl/collator.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 2f4a002a..86ee1cf4 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -709,9 +709,6 @@ bool Collator::unpack_last_mc_state() { return fatal_error(limits.move_as_error()); } block_limits_ = limits.move_as_ok(); - if (now_ > prev_now_ + 15 && block_limits_->lt_delta.hard() > 200) { - block_limits_->lt_delta = {20, 180, 200}; - } LOG(DEBUG) << "block limits: bytes [" << block_limits_->bytes.underload() << ", " << block_limits_->bytes.soft() << ", " << block_limits_->bytes.hard() << "]"; LOG(DEBUG) << "block limits: gas [" << block_limits_->gas.underload() << ", " << block_limits_->gas.soft() << ", " @@ -1433,6 +1430,9 @@ bool Collator::check_this_shard_mc_info() { bool Collator::init_block_limits() { CHECK(block_limits_); CHECK(state_usage_tree_); + if (now_ > prev_now_ + 15 && block_limits_->lt_delta.hard() > 200) { + block_limits_->lt_delta = {20, 180, 200}; + } block_limits_->usage_tree = state_usage_tree_.get(); block_limit_status_ = std::make_unique(*block_limits_); return true; From ace934ff35fd095296da9108d84f52cffca62473 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Tue, 19 Dec 2023 10:39:25 +0300 Subject: [PATCH 41/71] Adjust allowed time lag for last_liteserver_state + more verbose logs (#836) * Add logs to collator and validator * More logs to get_ext_messages, decrease verbosity level * Adjust allowed time lag for last_liteserver_state * Change verbosity of STATUS message --------- Co-authored-by: SpyCheese --- validator/impl/collator.cpp | 26 +++++++++++---------- validator/impl/fabric.cpp | 10 ++++---- validator/impl/validate-query.cpp | 13 +++++++---- validator/manager.cpp | 38 +++++++++++++++++++++++-------- validator/validator-group.cpp | 3 ++- 5 files changed, 58 insertions(+), 32 deletions(-) diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 86ee1cf4..1d277048 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -106,7 +106,7 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, UnixTime min_ts, BlockId * The results of these queries are handled by corresponding callback functions. */ void Collator::start_up() { - LOG(DEBUG) << "Collator for shard " << shard_.to_str() << " started"; + LOG(WARNING) << "Collator for shard " << shard_.to_str() << " started"; LOG(DEBUG) << "Previous block #1 is " << prev_blocks.at(0).to_str(); if (prev_blocks.size() > 1) { LOG(DEBUG) << "Previous block #2 is " << prev_blocks.at(1).to_str(); @@ -554,7 +554,7 @@ bool Collator::preprocess_prev_mc_state() { * @param res The retreived masterchain state. */ void Collator::after_get_mc_state(td::Result, BlockIdExt>> res) { - LOG(DEBUG) << "in Collator::after_get_mc_state()"; + LOG(WARNING) << "in Collator::after_get_mc_state()"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -589,7 +589,7 @@ void Collator::after_get_mc_state(td::Result, Bl * @param res The retrieved shard state. */ void Collator::after_get_shard_state(int idx, td::Result> res) { - LOG(DEBUG) << "in Collator::after_get_shard_state(" << idx << ")"; + LOG(WARNING) << "in Collator::after_get_shard_state(" << idx << ")"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -820,7 +820,6 @@ bool Collator::request_out_msg_queue_size() { * @param res The obtained outbound queue. */ void Collator::got_neighbor_out_queue(int i, td::Result> res) { - LOG(DEBUG) << "obtained outbound queue for neighbor #" << i; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -828,6 +827,7 @@ void Collator::got_neighbor_out_queue(int i, td::Result> res) } Ref outq_descr = res.move_as_ok(); block::McShardDescr& descr = neighbors_.at(i); + LOG(WARNING) << "obtained outbound queue for neighbor #" << i << " : " << descr.shard().to_str(); if (outq_descr->get_block_id() != descr.blk_) { LOG(DEBUG) << "outq_descr->id = " << outq_descr->get_block_id().to_str() << " ; descr.id = " << descr.blk_.to_str(); fatal_error( @@ -893,7 +893,7 @@ void Collator::got_out_queue_size(size_t i, td::Result res) { return; } td::uint32 size = res.move_as_ok(); - LOG(DEBUG) << "got outbound queue size from prev block #" << i << ": " << size; + LOG(WARNING) << "got outbound queue size from prev block #" << i << ": " << size; out_msg_queue_size_ += size; check_pending(); } @@ -1762,7 +1762,7 @@ bool Collator::register_shard_block_creators(std::vector creator_li */ bool Collator::try_collate() { if (!preinit_complete) { - LOG(DEBUG) << "running do_preinit()"; + LOG(WARNING) << "running do_preinit()"; if (!do_preinit()) { return fatal_error(-667, "error preinitializing data required by collator"); } @@ -2062,7 +2062,7 @@ bool Collator::do_collate() { // After do_collate started it will not be interrupted by timeout alarm_timestamp() = td::Timestamp::never(); - LOG(DEBUG) << "do_collate() : start"; + LOG(WARNING) << "do_collate() : start"; if (!fetch_config_params()) { return fatal_error("cannot fetch required configuration parameters from masterchain state"); } @@ -2276,8 +2276,8 @@ bool Collator::out_msg_queue_cleanup() { } return !delivered; }); - LOG(INFO) << "deleted " << deleted << " messages from out_msg_queue after merge, remaining queue size is " - << out_msg_queue_size_; + LOG(WARNING) << "deleted " << deleted << " messages from out_msg_queue after merge, remaining queue size is " + << out_msg_queue_size_; if (res < 0) { return fatal_error("error scanning/updating OutMsgQueue"); } @@ -2352,8 +2352,8 @@ bool Collator::out_msg_queue_cleanup() { std::swap(queue_parts[i], queue_parts.back()); queue_parts.pop_back(); } - LOG(INFO) << "deleted " << deleted << " messages from out_msg_queue, remaining queue size is " - << out_msg_queue_size_; + LOG(WARNING) << "deleted " << deleted << " messages from out_msg_queue, remaining queue size is " + << out_msg_queue_size_; } if (verbosity >= 2) { auto rt = out_msg_queue_->get_root(); @@ -5035,7 +5035,7 @@ void Collator::return_block_candidate(td::Result saved) { fatal_error(std::move(err)); } else { CHECK(block_candidate); - LOG(INFO) << "sending new BlockCandidate to Promise"; + LOG(WARNING) << "sending new BlockCandidate to Promise"; main_promise(block_candidate->clone()); busy_ = false; stop(); @@ -5133,6 +5133,8 @@ void Collator::after_get_external_messages(td::Resulthash()); } } + LOG(WARNING) << "got " << vect.size() << " external messages from mempool, " << bad_ext_msgs_.size() + << " bad messages"; check_pending(); } diff --git a/validator/impl/fabric.cpp b/validator/impl/fabric.cpp index 23a03482..ede8d36d 100644 --- a/validator/impl/fabric.cpp +++ b/validator/impl/fabric.cpp @@ -202,10 +202,12 @@ void run_validate_query(ShardIdFull shard, UnixTime min_ts, BlockIdExt min_maste seqno = p.seqno(); } } - td::actor::create_actor( - PSTRING() << (is_fake ? "fakevalidate" : "validateblock") << shard.to_str() << ":" << (seqno + 1), shard, min_ts, - min_masterchain_block_id, std::move(prev), std::move(candidate), std::move(validator_set), std::move(manager), - timeout, std::move(promise), is_fake) + static std::atomic idx; + td::actor::create_actor(PSTRING() << (is_fake ? "fakevalidate" : "validateblock") << shard.to_str() + << ":" << (seqno + 1) << "#" << idx.fetch_add(1), + shard, min_ts, min_masterchain_block_id, std::move(prev), std::move(candidate), + std::move(validator_set), std::move(manager), timeout, std::move(promise), + is_fake) .release(); } diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index d9d03207..d5f489a3 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -234,6 +234,7 @@ bool ValidateQuery::fatal_error(std::string err_msg, int err_code) { */ void ValidateQuery::finish_query() { if (main_promise) { + LOG(WARNING) << "validate query done"; main_promise.set_result(now_); } stop(); @@ -252,7 +253,7 @@ void ValidateQuery::finish_query() { * Then the function also sends requests to the ValidatorManager to fetch blocks and shard stated. */ void ValidateQuery::start_up() { - LOG(INFO) << "validate query for " << block_candidate.id.to_str() << " started"; + LOG(WARNING) << "validate query for " << block_candidate.id.to_str() << " started"; alarm_timestamp() = timeout; rand_seed_.set_zero(); created_by_ = block_candidate.pubkey; @@ -667,7 +668,7 @@ bool ValidateQuery::extract_collated_data() { * @param res The result of the retrieval of the latest masterchain state. */ void ValidateQuery::after_get_latest_mc_state(td::Result, BlockIdExt>> res) { - LOG(DEBUG) << "in ValidateQuery::after_get_latest_mc_state()"; + LOG(WARNING) << "in ValidateQuery::after_get_latest_mc_state()"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -708,7 +709,7 @@ void ValidateQuery::after_get_latest_mc_state(td::Result> res) { - LOG(DEBUG) << "in ValidateQuery::after_get_mc_state() for " << mc_blkid_.to_str(); + LOG(WARNING) << "in ValidateQuery::after_get_mc_state() for " << mc_blkid_.to_str(); --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -752,7 +753,7 @@ void ValidateQuery::got_mc_handle(td::Result res) { * @param res The result of the shard state retrieval. */ void ValidateQuery::after_get_shard_state(int idx, td::Result> res) { - LOG(DEBUG) << "in ValidateQuery::after_get_shard_state(" << idx << ")"; + LOG(WARNING) << "in ValidateQuery::after_get_shard_state(" << idx << ")"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -1495,7 +1496,6 @@ bool ValidateQuery::request_neighbor_queues() { * @param res The obtained outbound queue. */ void ValidateQuery::got_neighbor_out_queue(int i, td::Result> res) { - LOG(DEBUG) << "obtained outbound queue for neighbor #" << i; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -1503,6 +1503,7 @@ void ValidateQuery::got_neighbor_out_queue(int i, td::Result> } Ref outq_descr = res.move_as_ok(); block::McShardDescr& descr = neighbors_.at(i); + LOG(WARNING) << "obtained outbound queue for neighbor #" << i << " : " << descr.shard().to_str(); if (outq_descr->get_block_id() != descr.blk_) { LOG(DEBUG) << "outq_descr->id = " << outq_descr->get_block_id().to_str() << " ; descr.id = " << descr.blk_.to_str(); fatal_error( @@ -6187,6 +6188,7 @@ bool ValidateQuery::try_validate() { } try { if (!stage_) { + LOG(WARNING) << "try_validate stage 0"; if (!compute_prev_state()) { return fatal_error(-666, "cannot compute previous state"); } @@ -6216,6 +6218,7 @@ bool ValidateQuery::try_validate() { return true; } } + LOG(WARNING) << "try_validate stage 1"; LOG(INFO) << "running automated validity checks for block candidate " << id_.to_str(); if (!block::gen::t_Block.validate_ref(10000000, block_root_)) { return reject_query("block "s + id_.to_str() + " failed to pass automated validity checks"); diff --git a/validator/manager.cpp b/validator/manager.cpp index 54d272bc..4bac4a74 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -784,6 +784,8 @@ void ValidatorManagerImpl::wait_block_message_queue_short(BlockIdExt block_id, t void ValidatorManagerImpl::get_external_messages(ShardIdFull shard, td::Promise>> promise) { + td::Timer t; + size_t processed = 0, deleted = 0; std::vector> res; MessageId left{AccountIdPrefixFull{shard.workchain, shard.shard & (shard.shard - 1)}, Bits256::zero()}; auto it = ext_messages_.lower_bound(left); @@ -792,10 +794,12 @@ void ValidatorManagerImpl::get_external_messages(ShardIdFull shard, if (!shard_contains(shard, s.dst)) { break; } + ++processed; if (it->second->expired()) { ext_addr_messages_[it->second->address()].erase(it->first.hash); ext_messages_hashes_.erase(it->first.hash); it = ext_messages_.erase(it); + ++deleted; continue; } if (it->second->is_active()) { @@ -803,6 +807,9 @@ void ValidatorManagerImpl::get_external_messages(ShardIdFull shard, } it++; } + LOG(WARNING) << "get_external_messages to shard " << shard.to_str() << " : time=" << t.elapsed() + << " result_size=" << res.size() << " processed=" << processed << " expired=" << deleted + << " total_size=" << ext_messages_.size(); promise.set_value(std::move(res)); } @@ -1356,7 +1363,18 @@ td::Ref ValidatorManagerImpl::do_get_last_liteserver_state() { if (last_masterchain_state_.is_null()) { return {}; } - if (last_liteserver_state_.is_null() || last_liteserver_state_->get_unix_time() < td::Clocks::system() - 30) { + if (last_liteserver_state_.is_null()) { + last_liteserver_state_ = last_masterchain_state_; + return last_liteserver_state_; + } + if (last_liteserver_state_->get_seqno() == last_masterchain_state_->get_seqno()) { + return last_liteserver_state_; + } + // If liteserver seqno (i.e. shard client) lags then use last masterchain state for liteserver + // Allowed lag depends on the block rate + double time_per_block = double(last_masterchain_state_->get_unix_time() - last_liteserver_state_->get_unix_time()) / + double(last_masterchain_state_->get_seqno() - last_liteserver_state_->get_seqno()); + if (td::Clocks::system() - double(last_liteserver_state_->get_unix_time()) > std::min(time_per_block * 8, 180.0)) { last_liteserver_state_ = last_masterchain_state_; } return last_liteserver_state_; @@ -2356,15 +2374,15 @@ void ValidatorManagerImpl::alarm() { } if (log_status_at_.is_in_past()) { if (last_masterchain_block_handle_) { - LOG(INFO) << "STATUS: last_masterchain_block_ago=" - << td::format::as_time(td::Clocks::system() - last_masterchain_block_handle_->unix_time()) - << " last_known_key_block_ago=" - << td::format::as_time(td::Clocks::system() - (last_known_key_block_handle_->inited_unix_time() - ? last_known_key_block_handle_->unix_time() - : 0)) - << " shard_client_ago=" - << td::format::as_time(td::Clocks::system() - - (shard_client_handle_ ? shard_client_handle_->unix_time() : 0)); + LOG(ERROR) << "STATUS: last_masterchain_block_ago=" + << td::format::as_time(td::Clocks::system() - last_masterchain_block_handle_->unix_time()) + << " last_known_key_block_ago=" + << td::format::as_time(td::Clocks::system() - (last_known_key_block_handle_->inited_unix_time() + ? last_known_key_block_handle_->unix_time() + : 0)) + << " shard_client_ago=" + << td::format::as_time(td::Clocks::system() - + (shard_client_handle_ ? shard_client_handle_->unix_time() : 0)); } log_status_at_ = td::Timestamp::in(60.0); } diff --git a/validator/validator-group.cpp b/validator/validator-group.cpp index c1f4f38a..7baf0f05 100644 --- a/validator/validator-group.cpp +++ b/validator/validator-group.cpp @@ -136,7 +136,7 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s std::vector approve_signatures, validatorsession::ValidatorSessionStats stats, td::Promise promise) { - if (round_id >= last_known_round_id_) { + if (round_id >= last_known_round_id_) { last_known_round_id_ = round_id + 1; } auto sig_set = create_signature_set(std::move(signatures)); @@ -150,6 +150,7 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s return; } auto next_block_id = create_next_block_id(root_hash, file_hash); + LOG(WARNING) << "Accepted block " << next_block_id; td::actor::send_closure(manager_, &ValidatorManager::log_validator_session_stats, next_block_id, std::move(stats)); auto block = block_data.size() > 0 ? create_block(next_block_id, std::move(block_data)).move_as_ok() : td::Ref{}; From 83efcebad0adcc63a330eb9716c5e8d884679143 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Tue, 19 Dec 2023 10:39:35 +0300 Subject: [PATCH 42/71] Improve CellDb migration (#835) * Fix deserializing cells * Use proxy actor * Add delays * Print stats every minute Co-authored-by: SpyCheese --- crypto/vm/boc.cpp | 4 +- crypto/vm/boc.h | 2 +- crypto/vm/db/CellStorage.cpp | 2 +- validator-engine/validator-engine.cpp | 14 +++--- validator/db/celldb.cpp | 61 ++++++++++++++++++++------- validator/db/celldb.hpp | 24 +++++++++++ 6 files changed, 82 insertions(+), 25 deletions(-) diff --git a/crypto/vm/boc.cpp b/crypto/vm/boc.cpp index 11583ede..bd334cbf 100644 --- a/crypto/vm/boc.cpp +++ b/crypto/vm/boc.cpp @@ -930,7 +930,7 @@ unsigned long long BagOfCells::get_idx_entry_raw(int index) { * */ -td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty) { +td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty, bool allow_nonzero_level) { if (data.empty() && can_be_empty) { return Ref(); } @@ -946,7 +946,7 @@ td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty) { if (root.is_null()) { return td::Status::Error("bag of cells has null root cell (?)"); } - if (root->get_level() != 0) { + if (!allow_nonzero_level && root->get_level() != 0) { return td::Status::Error("bag of cells has a root with non-zero level"); } return std::move(root); diff --git a/crypto/vm/boc.h b/crypto/vm/boc.h index c7a1810d..1bff5b25 100644 --- a/crypto/vm/boc.h +++ b/crypto/vm/boc.h @@ -323,7 +323,7 @@ class BagOfCells { std::vector* cell_should_cache); }; -td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty = false); +td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty = false, bool allow_nonzero_level = false); td::Result std_boc_serialize(Ref root, int mode = 0); td::Result>> std_boc_deserialize_multi(td::Slice data, diff --git a/crypto/vm/db/CellStorage.cpp b/crypto/vm/db/CellStorage.cpp index acc55898..303d4650 100644 --- a/crypto/vm/db/CellStorage.cpp +++ b/crypto/vm/db/CellStorage.cpp @@ -98,7 +98,7 @@ class RefcntCellParser { auto size = parser.get_left_len(); td::Slice data = parser.template fetch_string_raw(size); if (stored_boc_) { - TRY_RESULT(boc, vm::std_boc_deserialize(data)); + TRY_RESULT(boc, vm::std_boc_deserialize(data, false, true)); TRY_RESULT(loaded_cell, boc->load_cell()); cell = std::move(loaded_cell.data_cell); return td::Status::OK(); diff --git a/validator-engine/validator-engine.cpp b/validator-engine/validator-engine.cpp index d98c296c..1d9223f2 100644 --- a/validator-engine/validator-engine.cpp +++ b/validator-engine/validator-engine.cpp @@ -3775,11 +3775,15 @@ int main(int argc, char *argv[]) { acts.push_back([&x, at]() { td::actor::send_closure(x, &ValidatorEngine::schedule_shutdown, (double)at); }); return td::Status::OK(); }); - p.add_checked_option('\0', "celldb-compress-depth", "(default: 0)", [&](td::Slice arg) { - TRY_RESULT(value, td::to_integer_safe(arg)); - acts.push_back([&x, value]() { td::actor::send_closure(x, &ValidatorEngine::set_celldb_compress_depth, value); }); - return td::Status::OK(); - }); + p.add_checked_option('\0', "celldb-compress-depth", + "optimize celldb by storing cells of depth X with whole subtrees (experimental, default: 0)", + [&](td::Slice arg) { + TRY_RESULT(value, td::to_integer_safe(arg)); + acts.push_back([&x, value]() { + td::actor::send_closure(x, &ValidatorEngine::set_celldb_compress_depth, value); + }); + return td::Status::OK(); + }); auto S = p.run(argc, argv); if (S.is_error()) { LOG(ERROR) << "failed to parse options: " << S.move_as_error(); diff --git a/validator/db/celldb.cpp b/validator/db/celldb.cpp index 6a2b4699..d29126ce 100644 --- a/validator/db/celldb.cpp +++ b/validator/db/celldb.cpp @@ -23,6 +23,7 @@ #include "ton/ton-tl.hpp" #include "ton/ton-io.hpp" +#include "common/delay.h" namespace ton { @@ -68,14 +69,16 @@ CellDbIn::CellDbIn(td::actor::ActorId root_db, td::actor::ActorId>( + td::actor::create_actor("celldbmigration", actor_id(this))), compress_depth = opts_->get_celldb_compress_depth()](const vm::CellLoader::LoadResult& res) { if (res.cell_.is_null()) { return; } bool expected_stored_boc = res.cell_->get_depth() == compress_depth && compress_depth != 0; if (expected_stored_boc != res.stored_boc_) { - td::actor::send_closure(db, &CellDbIn::migrate_cell, td::Bits256{res.cell_->get_hash().bits()}); + td::actor::send_closure(*actor, &CellDbIn::MigrationProxy::migrate_cell, + td::Bits256{res.cell_->get_hash().bits()}); } }; @@ -156,6 +159,13 @@ void CellDbIn::alarm() { if (migrate_after_ && migrate_after_.is_in_past()) { migrate_cells(); } + if (migration_stats_ && migration_stats_->end_at_.is_in_past()) { + LOG(INFO) << "CellDb migration, " << migration_stats_->start_.elapsed() + << "s stats: batches=" << migration_stats_->batches_ << " migrated=" << migration_stats_->migrated_cells_ + << " checked=" << migration_stats_->checked_cells_ << " time=" << migration_stats_->total_time_ + << " queue_size=" << cells_to_migrate_.size(); + migration_stats_ = {}; + } auto E = get_block(get_empty_key_hash()).move_as_ok(); auto N = get_block(E.next).move_as_ok(); if (N.is_empty()) { @@ -291,23 +301,31 @@ void CellDbIn::set_block(KeyHash key_hash, DbEntry e) { void CellDbIn::migrate_cell(td::Bits256 hash) { cells_to_migrate_.insert(hash); - if (cells_to_migrate_.size() >= 32) { - migrate_cells(); - } else if (!migrate_after_) { - migrate_after_ = td::Timestamp::in(1.0); + if (!migration_active_) { + migration_active_ = true; + migrate_after_ = td::Timestamp::in(10.0); } } void CellDbIn::migrate_cells() { + migrate_after_ = td::Timestamp::never(); if (cells_to_migrate_.empty()) { + migration_active_ = false; return; } + td::Timer timer; + if (!migration_stats_) { + migration_stats_ = std::make_unique(); + } vm::CellStorer stor{*cell_db_}; auto loader = std::make_unique(cell_db_->snapshot()); boc_->set_loader(std::make_unique(*loader)).ensure(); cell_db_->begin_write_batch().ensure(); - td::uint32 cnt = 0; - for (const auto& hash : cells_to_migrate_) { + td::uint32 checked = 0, migrated = 0; + for (auto it = cells_to_migrate_.begin(); it != cells_to_migrate_.end() && checked < 128; ) { + ++checked; + td::Bits256 hash = *it; + it = cells_to_migrate_.erase(it); auto R = loader->load(hash.as_slice(), true, boc_->as_ext_cell_creator()); if (R.is_error()) { continue; @@ -318,18 +336,27 @@ void CellDbIn::migrate_cells() { bool expected_stored_boc = R.ok().cell_->get_depth() == opts_->get_celldb_compress_depth() && opts_->get_celldb_compress_depth() != 0; if (expected_stored_boc != R.ok().stored_boc_) { - ++cnt; + ++migrated; stor.set(R.ok().refcnt(), R.ok().cell_, expected_stored_boc).ensure(); } } - cells_to_migrate_.clear(); - if (cnt > 0) { - LOG(DEBUG) << "Migrated " << cnt << " cells"; - } cell_db_->commit_write_batch().ensure(); boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); - migrate_after_ = td::Timestamp::never(); + + double time = timer.elapsed(); + LOG(DEBUG) << "CellDb migration: migrated=" << migrated << " checked=" << checked << " time=" << time; + ++migration_stats_->batches_; + migration_stats_->migrated_cells_ += migrated; + migration_stats_->checked_cells_ += checked; + migration_stats_->total_time_ += time; + + if (cells_to_migrate_.empty()) { + migration_active_ = false; + } else { + delay_action([SelfId = actor_id(this)] { td::actor::send_closure(SelfId, &CellDbIn::migrate_cells); }, + td::Timestamp::in(time * 2)); + } } void CellDb::load_cell(RootHash hash, td::Promise> promise) { @@ -361,14 +388,16 @@ void CellDb::start_up() { boc_ = vm::DynamicBagOfCellsDb::create(); boc_->set_celldb_compress_depth(opts_->get_celldb_compress_depth()); cell_db_ = td::actor::create_actor("celldbin", root_db_, actor_id(this), path_, opts_); - on_load_callback_ = [db = cell_db_.get(), + on_load_callback_ = [actor = std::make_shared>( + td::actor::create_actor("celldbmigration", cell_db_.get())), compress_depth = opts_->get_celldb_compress_depth()](const vm::CellLoader::LoadResult& res) { if (res.cell_.is_null()) { return; } bool expected_stored_boc = res.cell_->get_depth() == compress_depth && compress_depth != 0; if (expected_stored_boc != res.stored_boc_) { - td::actor::send_closure(db, &CellDbIn::migrate_cell, td::Bits256{res.cell_->get_hash().bits()}); + td::actor::send_closure(*actor, &CellDbIn::MigrationProxy::migrate_cell, + td::Bits256{res.cell_->get_hash().bits()}); } }; } diff --git a/validator/db/celldb.hpp b/validator/db/celldb.hpp index 6545d597..a2a84ab4 100644 --- a/validator/db/celldb.hpp +++ b/validator/db/celldb.hpp @@ -107,6 +107,30 @@ class CellDbIn : public CellDbBase { std::function on_load_callback_; std::set cells_to_migrate_; td::Timestamp migrate_after_ = td::Timestamp::never(); + bool migration_active_ = false; + + struct MigrationStats { + td::Timer start_; + td::Timestamp end_at_ = td::Timestamp::in(60.0); + size_t batches_ = 0; + size_t migrated_cells_ = 0; + size_t checked_cells_ = 0; + double total_time_ = 0.0; + }; + std::unique_ptr migration_stats_; + + public: + class MigrationProxy : public td::actor::Actor { + public: + explicit MigrationProxy(td::actor::ActorId cell_db) : cell_db_(cell_db) { + } + void migrate_cell(td::Bits256 hash) { + td::actor::send_closure(cell_db_, &CellDbIn::migrate_cell, hash); + } + + private: + td::actor::ActorId cell_db_; + }; }; class CellDb : public CellDbBase { From b3be4283ff80e8b7290a6a35b503672c8823e839 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 25 Dec 2023 09:30:51 +0300 Subject: [PATCH 43/71] Better error messages from LS for missing blocks (#837) Co-authored-by: SpyCheese --- validator/impl/liteserver.cpp | 37 ++---- validator/interfaces/validator-manager.h | 8 ++ validator/manager-disk.hpp | 15 +++ validator/manager-hardfork.hpp | 15 +++ validator/manager.cpp | 144 ++++++++++++++++++++++- validator/manager.hpp | 14 +++ 6 files changed, 202 insertions(+), 31 deletions(-) diff --git a/validator/impl/liteserver.cpp b/validator/impl/liteserver.cpp index 9c7a0456..86fc690a 100644 --- a/validator/impl/liteserver.cpp +++ b/validator/impl/liteserver.cpp @@ -505,20 +505,7 @@ void LiteQuery::perform_sendMessage(td::BufferSlice data) { } void LiteQuery::get_block_handle_checked(BlockIdExt blkid, td::Promise promise) { - auto P = td::PromiseCreator::lambda( - [promise = std::move(promise)](td::Result R) mutable { - if (R.is_error()) { - promise.set_error(R.move_as_error()); - } else { - auto handle = R.move_as_ok(); - if (handle->is_applied()) { - promise.set_result(std::move(handle)); - } else { - promise.set_error(td::Status::Error(ErrorCode::notready, "block is not applied")); - } - } - }); - td::actor::send_closure(manager_, &ValidatorManager::get_block_handle, blkid, false, std::move(P)); + td::actor::send_closure(manager_, &ValidatorManager::get_block_handle_for_litequery, blkid, std::move(promise)); } bool LiteQuery::request_mc_block_data(BlockIdExt blkid) { @@ -1497,17 +1484,12 @@ void LiteQuery::continue_getTransactions(unsigned remaining, bool exact) { LOG(DEBUG) << "sending get_block_by_lt_from_db() query to manager for " << acc_workchain_ << ":" << acc_addr_.to_hex() << " " << trans_lt_; td::actor::send_closure_later( - manager_, &ValidatorManager::get_block_by_lt_from_db, ton::extract_addr_prefix(acc_workchain_, acc_addr_), + manager_, &ValidatorManager::get_block_by_lt_from_db_for_litequery, ton::extract_addr_prefix(acc_workchain_, acc_addr_), trans_lt_, [Self = actor_id(this), remaining, manager = manager_](td::Result res) { if (res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_getTransactions, res.move_as_error(), ton::BlockIdExt{}); } else { auto handle = res.move_as_ok(); - if (!handle->is_applied()) { - td::actor::send_closure(Self, &LiteQuery::abort_getTransactions, td::Status::Error(ErrorCode::notready, "block is not applied"), - ton::BlockIdExt{}); - return; - } LOG(DEBUG) << "requesting data for block " << handle->id().to_str(); td::actor::send_closure_later(manager, &ValidatorManager::get_block_data_from_db, handle, [Self, blkid = handle->id(), remaining](td::Result> res) { @@ -1846,10 +1828,6 @@ void LiteQuery::perform_lookupBlock(BlockId blkid, int mode, LogicalTime lt, Uni td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error()); } else { auto handle = res.move_as_ok(); - if (!handle->is_applied()) { - td::actor::send_closure(Self, &LiteQuery::abort_query, td::Status::Error(ErrorCode::notready, "block is not applied")); - return; - } LOG(DEBUG) << "requesting data for block " << handle->id().to_str(); td::actor::send_closure_later(manager, &ValidatorManager::get_block_data_from_db, handle, [Self, blkid = handle->id(), mode](td::Result> res) { @@ -1865,13 +1843,14 @@ void LiteQuery::perform_lookupBlock(BlockId blkid, int mode, LogicalTime lt, Uni ton::AccountIdPrefixFull pfx{blkid.workchain, blkid.shard}; if (mode & 2) { - td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_lt_from_db, pfx, lt, std::move(P)); + td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_lt_from_db_for_litequery, pfx, lt, + std::move(P)); } else if (mode & 4) { - td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_unix_time_from_db, pfx, utime, + td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_unix_time_from_db_for_litequery, pfx, utime, std::move(P)); } else { - td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_seqno_from_db, pfx, blkid.seqno, - std::move(P)); + td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_seqno_from_db_for_litequery, pfx, + blkid.seqno, std::move(P)); } } @@ -2629,7 +2608,7 @@ void LiteQuery::perform_getShardBlockProof(BlockIdExt blkid) { } AccountIdPrefixFull pfx{masterchainId, shardIdAll}; td::actor::send_closure_later( - manager, &ValidatorManager::get_block_by_seqno_from_db, pfx, handle->masterchain_ref_block(), + manager, &ValidatorManager::get_block_by_seqno_from_db_for_litequery, pfx, handle->masterchain_ref_block(), [Self, manager](td::Result R) { if (R.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, R.move_as_error()); diff --git a/validator/interfaces/validator-manager.h b/validator/interfaces/validator-manager.h index ae15ed4c..6b375bac 100644 --- a/validator/interfaces/validator-manager.h +++ b/validator/interfaces/validator-manager.h @@ -170,6 +170,14 @@ class ValidatorManager : public ValidatorManagerInterface { virtual void log_validator_session_stats(BlockIdExt block_id, validatorsession::ValidatorSessionStats stats) = 0; + virtual void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) = 0; + virtual void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) = 0; + virtual void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) = 0; + virtual void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) = 0; + static bool is_persistent_state(UnixTime ts, UnixTime prev_ts) { return ts / (1 << 17) != prev_ts / (1 << 17); } diff --git a/validator/manager-disk.hpp b/validator/manager-disk.hpp index 1c3f4f5b..4812d60a 100644 --- a/validator/manager-disk.hpp +++ b/validator/manager-disk.hpp @@ -384,6 +384,21 @@ class ValidatorManagerImpl : public ValidatorManager { } td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); } + void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) override { + get_block_handle(block_id, false, promise.wrap([](BlockHandle &&handle) -> ConstBlockHandle { return handle; })); + } + void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) override { + get_block_by_lt_from_db(account, lt, std::move(promise)); + } + void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) override { + get_block_by_unix_time_from_db(account, ts, std::move(promise)); + } + void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) override { + get_block_by_seqno_from_db(account, seqno, std::move(promise)); + } private: PublicKeyHash local_id_; diff --git a/validator/manager-hardfork.hpp b/validator/manager-hardfork.hpp index 675c2304..c34ae5c7 100644 --- a/validator/manager-hardfork.hpp +++ b/validator/manager-hardfork.hpp @@ -445,6 +445,21 @@ class ValidatorManagerImpl : public ValidatorManager { } td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); } + void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) override { + get_block_handle(block_id, false, promise.wrap([](BlockHandle &&handle) -> ConstBlockHandle { return handle; })); + } + void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) override { + get_block_by_lt_from_db(account, lt, std::move(promise)); + } + void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) override { + get_block_by_unix_time_from_db(account, ts, std::move(promise)); + } + void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) override { + get_block_by_seqno_from_db(account, seqno, std::move(promise)); + } private: td::Ref opts_; diff --git a/validator/manager.cpp b/validator/manager.cpp index 4bac4a74..dcda7b6e 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -2334,8 +2334,11 @@ void ValidatorManagerImpl::update_shard_client_block_handle(BlockHandle handle, td::Promise promise) { shard_client_handle_ = std::move(handle); auto seqno = shard_client_handle_->id().seqno(); - if (last_liteserver_state_.is_null() || last_liteserver_state_->get_block_id().seqno() < seqno) { - last_liteserver_state_ = std::move(state); + if (state.not_null()) { + shard_client_shards_ = state->get_shards(); + if (last_liteserver_state_.is_null() || last_liteserver_state_->get_block_id().seqno() < seqno) { + last_liteserver_state_ = std::move(state); + } } shard_client_update(seqno); promise.set_value(td::Unit()); @@ -2662,6 +2665,143 @@ void ValidatorManagerImpl::log_validator_session_stats(BlockIdExt block_id, LOG(INFO) << "Writing validator session stats for " << block_id.id; } +void ValidatorManagerImpl::get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) { + get_block_handle( + block_id, false, + [SelfId = actor_id(this), block_id, promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_block_handle_for_litequery_error, block_id, + std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) { + get_block_by_lt_from_db( + account, lt, [=, SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_lookup_block_for_litequery_error, account, 0, + lt, std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) { + get_block_by_unix_time_from_db( + account, ts, [=, SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_lookup_block_for_litequery_error, account, 1, + ts, std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) { + get_block_by_seqno_from_db( + account, seqno, + [=, SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_lookup_block_for_litequery_error, account, 2, + seqno, std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::process_block_handle_for_litequery_error(BlockIdExt block_id, + td::Result r_handle, + td::Promise promise) { + td::Status err; + if (r_handle.is_error()) { + err = r_handle.move_as_error(); + } else { + auto handle = r_handle.move_as_ok(); + if (handle->is_applied()) { + promise.set_value(std::move(handle)); + return; + } + if (!handle->received() || !handle->received_state()) { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << block_id.id.to_str() << " is not in db"); + } else { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << block_id.id.to_str() << " is not applied"); + } + } + if (block_id.is_masterchain()) { + if (block_id.seqno() > last_masterchain_seqno_) { + err = err.move_as_error_suffix(PSTRING() << " (last known masterchain block: " << last_masterchain_seqno_ << ")"); + } + } else { + for (auto &shard : shard_client_shards_) { + if (shard_intersects(shard->shard(), block_id.shard_full())) { + if (block_id.seqno() > shard->top_block_id().seqno()) { + err = err.move_as_error_suffix( + PSTRING() << " (possibly out of sync: shard_client_seqno=" + << (shard_client_handle_ ? shard_client_handle_->id().seqno() : 0) << " ls_seqno=" + << (last_liteserver_state_.not_null() ? last_liteserver_state_->get_seqno() : 0) << ")"); + } + break; + } + } + } + promise.set_error(std::move(err)); +} + +void ValidatorManagerImpl::process_lookup_block_for_litequery_error(AccountIdPrefixFull account, int type, + td::uint64 value, + td::Result r_handle, + td::Promise promise) { + td::Status err; + if (r_handle.is_error()) { + err = r_handle.move_as_error(); + } else { + auto handle = r_handle.move_as_ok(); + if (handle->is_applied()) { + promise.set_value(std::move(handle)); + return; + } + if (!handle->received() || !handle->received_state()) { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << handle->id().to_str() << " is not in db"); + } else { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << handle->id().to_str() << " is not applied"); + } + } + if (account.is_masterchain()) { + if (value > (type == 0 + ? last_masterchain_state_->get_logical_time() + : (type == 1 ? last_masterchain_state_->get_unix_time() : last_masterchain_state_->get_seqno()))) { + err = err.move_as_error_suffix(PSTRING() << " (last known masterchain block: " << last_masterchain_seqno_ << ")"); + } + } else { + for (auto &shard : shard_client_shards_) { + if (shard_intersects(shard->shard(), account.as_leaf_shard())) { + if (value > (type == 0 ? shard->end_lt() + : (type == 1 ? (shard_client_handle_ ? shard_client_handle_->unix_time() : 0) + : shard->top_block_id().seqno()))) { + err = err.move_as_error_suffix( + PSTRING() << " (possibly out of sync: shard_client_seqno=" + << (shard_client_handle_ ? shard_client_handle_->id().seqno() : 0) << " ls_seqno=" + << (last_liteserver_state_.not_null() ? last_liteserver_state_->get_seqno() : 0) << ")"); + } + break; + } + } + } + static std::string names[3] = {"lt", "utime", "seqno"}; + err = err.move_as_error_prefix(PSTRING() << "cannot find block " << account.to_str() << " " << names[type] << "=" + << value << ": "); + promise.set_error(std::move(err)); +} + td::actor::ActorOwn ValidatorManagerFactory::create( td::Ref opts, std::string db_root, td::actor::ActorId keyring, td::actor::ActorId adnl, td::actor::ActorId rldp, diff --git a/validator/manager.hpp b/validator/manager.hpp index 9f51cc27..bdf0155e 100644 --- a/validator/manager.hpp +++ b/validator/manager.hpp @@ -252,6 +252,7 @@ class ValidatorManagerImpl : public ValidatorManager { BlockHandle last_key_block_handle_; BlockHandle last_known_key_block_handle_; BlockHandle shard_client_handle_; + std::vector> shard_client_shards_; td::Ref last_liteserver_state_; td::Ref do_get_last_liteserver_state(); @@ -561,6 +562,19 @@ class ValidatorManagerImpl : public ValidatorManager { td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); } + void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) override; + void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) override; + void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) override; + void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) override; + void process_block_handle_for_litequery_error(BlockIdExt block_id, td::Result r_handle, + td::Promise promise); + void process_lookup_block_for_litequery_error(AccountIdPrefixFull account, int type, td::uint64 value, + td::Result r_handle, + td::Promise promise); + private: td::Timestamp resend_shard_blocks_at_; td::Timestamp check_waiters_at_; From c8918f0c02a800d32bee8f5f633575e7fde7a8af Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 27 Dec 2023 15:50:09 +0300 Subject: [PATCH 44/71] Write config.json using temp file (#839) Co-authored-by: SpyCheese --- dht-server/dht-server.cpp | 17 +++++++++++++---- dht-server/dht-server.hpp | 3 +++ validator-engine/validator-engine.cpp | 17 +++++++++++++---- validator-engine/validator-engine.hpp | 5 ++++- 4 files changed, 33 insertions(+), 9 deletions(-) diff --git a/dht-server/dht-server.cpp b/dht-server/dht-server.cpp index f729105f..37a158eb 100644 --- a/dht-server/dht-server.cpp +++ b/dht-server/dht-server.cpp @@ -572,6 +572,12 @@ void DhtServer::load_config(td::Promise promise) { config_file_ = db_root_ + "/config.json"; } auto conf_data_R = td::read_file(config_file_); + if (conf_data_R.is_error()) { + conf_data_R = td::read_file(temp_config_file()); + if (conf_data_R.is_ok()) { + td::rename(temp_config_file(), config_file_).ensure(); + } + } if (conf_data_R.is_error()) { auto P = td::PromiseCreator::lambda( [name = local_config_, new_name = config_file_, promise = std::move(promise)](td::Result R) { @@ -620,12 +626,15 @@ void DhtServer::load_config(td::Promise promise) { void DhtServer::write_config(td::Promise promise) { auto s = td::json_encode(td::ToJson(*config_.tl().get()), true); - auto S = td::write_file(config_file_, s); - if (S.is_ok()) { - promise.set_value(td::Unit()); - } else { + auto S = td::write_file(temp_config_file(), s); + if (S.is_error()) { + td::unlink(temp_config_file()).ignore(); promise.set_error(std::move(S)); + return; } + td::unlink(config_file_).ignore(); + TRY_STATUS_PROMISE(promise, td::rename(temp_config_file(), config_file_)); + promise.set_value(td::Unit()); } td::Promise DhtServer::get_key_promise(td::MultiPromise::InitGuard &ig) { diff --git a/dht-server/dht-server.hpp b/dht-server/dht-server.hpp index bf24d621..5b81875b 100644 --- a/dht-server/dht-server.hpp +++ b/dht-server/dht-server.hpp @@ -109,6 +109,9 @@ class DhtServer : public td::actor::Actor { std::string local_config_ = ""; std::string global_config_ = "ton-global.config"; std::string config_file_; + std::string temp_config_file() const { + return config_file_ + ".tmp"; + } std::string db_root_ = "/var/ton-work/db/"; diff --git a/validator-engine/validator-engine.cpp b/validator-engine/validator-engine.cpp index 1d9223f2..e488504f 100644 --- a/validator-engine/validator-engine.cpp +++ b/validator-engine/validator-engine.cpp @@ -1595,6 +1595,12 @@ void ValidatorEngine::load_config(td::Promise promise) { config_file_ = db_root_ + "/config.json"; } auto conf_data_R = td::read_file(config_file_); + if (conf_data_R.is_error()) { + conf_data_R = td::read_file(temp_config_file()); + if (conf_data_R.is_ok()) { + td::rename(temp_config_file(), config_file_).ensure(); + } + } if (conf_data_R.is_error()) { auto P = td::PromiseCreator::lambda( [name = local_config_, new_name = config_file_, promise = std::move(promise)](td::Result R) { @@ -1643,12 +1649,15 @@ void ValidatorEngine::load_config(td::Promise promise) { void ValidatorEngine::write_config(td::Promise promise) { auto s = td::json_encode(td::ToJson(*config_.tl().get()), true); - auto S = td::write_file(config_file_, s); - if (S.is_ok()) { - promise.set_value(td::Unit()); - } else { + auto S = td::write_file(temp_config_file(), s); + if (S.is_error()) { + td::unlink(temp_config_file()).ignore(); promise.set_error(std::move(S)); + return; } + td::unlink(config_file_).ignore(); + TRY_STATUS_PROMISE(promise, td::rename(temp_config_file(), config_file_)); + promise.set_value(td::Unit()); } td::Promise ValidatorEngine::get_key_promise(td::MultiPromise::InitGuard &ig) { diff --git a/validator-engine/validator-engine.hpp b/validator-engine/validator-engine.hpp index ebcd60c6..e59bb418 100644 --- a/validator-engine/validator-engine.hpp +++ b/validator-engine/validator-engine.hpp @@ -1,4 +1,4 @@ -/* +/* This file is part of TON Blockchain source code. TON Blockchain is free software; you can redistribute it and/or @@ -152,6 +152,9 @@ class ValidatorEngine : public td::actor::Actor { std::string local_config_ = ""; std::string global_config_ = "ton-global.config"; std::string config_file_; + std::string temp_config_file() const { + return config_file_ + ".tmp"; + } std::string fift_dir_ = ""; From 550c28d7dbc23a52ca8c6e6d9e4a2de14822db90 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Thu, 28 Dec 2023 09:43:10 +0300 Subject: [PATCH 45/71] Improve DHT store/load, pinging overlay peers (#840) * Improve DHT store/load, pinging overlay peers * Fix speed limits in storage * Use keyStoreTypeDirectory in rldp-http-proxy and storage-daemon Mainly for caching synced block in tonlib. --------- Co-authored-by: SpyCheese --- dht/dht-in.hpp | 5 +- dht/dht-query.cpp | 87 +++++++++++++---------- dht/dht-query.hpp | 16 +++-- dht/dht.cpp | 22 +++--- dht/dht.hpp | 2 +- overlay/overlay-manager.cpp | 6 +- overlay/overlay-manager.h | 2 +- overlay/overlay.cpp | 23 ++++-- overlay/overlay.h | 2 +- overlay/overlay.hpp | 24 ++++++- overlay/overlays.h | 7 +- rldp-http-proxy/rldp-http-proxy.cpp | 16 ++++- storage/PeerActor.cpp | 24 +++++-- storage/PeerManager.h | 6 +- storage/SpeedLimiter.cpp | 5 +- storage/storage-daemon/storage-daemon.cpp | 4 +- 16 files changed, 162 insertions(+), 89 deletions(-) diff --git a/dht/dht-in.hpp b/dht/dht-in.hpp index 59ce2184..c2d20455 100644 --- a/dht/dht-in.hpp +++ b/dht/dht-in.hpp @@ -155,10 +155,7 @@ class DhtMemberImpl : public DhtMember { } } - void add_full_node(DhtKeyId id, DhtNode node) override { - add_full_node_impl(id, std::move(node)); - } - void add_full_node_impl(DhtKeyId id, DhtNode node, bool set_active = false); + void add_full_node(DhtKeyId id, DhtNode node, bool set_active) override; adnl::AdnlNodeIdShort get_id() const override { return id_; diff --git a/dht/dht-query.cpp b/dht/dht-query.cpp index bc61242d..b84ef8c3 100644 --- a/dht/dht-query.cpp +++ b/dht/dht-query.cpp @@ -34,24 +34,33 @@ namespace ton { namespace dht { void DhtQuery::send_queries() { + while (pending_queries_.size() > k_ * 2) { + pending_queries_.erase(--pending_queries_.end()); + } VLOG(DHT_EXTRA_DEBUG) << this << ": sending new queries. active=" << active_queries_ << " max_active=" << a_; - while (pending_ids_.size() > 0 && active_queries_ < a_) { + while (pending_queries_.size() > 0 && active_queries_ < a_) { + auto id_xor = *pending_queries_.begin(); + if (result_list_.size() == k_ && *result_list_.rbegin() < id_xor) { + break; + } active_queries_++; - auto id_xor = *pending_ids_.begin(); auto id = id_xor ^ key_; VLOG(DHT_EXTRA_DEBUG) << this << ": sending " << get_name() << " query to " << id; - pending_ids_.erase(id_xor); + pending_queries_.erase(id_xor); - auto it = list_.find(id_xor); - CHECK(it != list_.end()); - td::actor::send_closure(adnl_, &adnl::Adnl::add_peer, get_src(), it->second.adnl_id(), it->second.addr_list()); + auto it = nodes_.find(id_xor); + CHECK(it != nodes_.end()); + td::actor::send_closure(adnl_, &adnl::Adnl::add_peer, get_src(), it->second.node.adnl_id(), + it->second.node.addr_list()); send_one_query(id.to_adnl()); } if (active_queries_ == 0) { - CHECK(pending_ids_.size() == 0); + pending_queries_.clear(); DhtNodesList list; - for (auto &node : list_) { - list.push_back(std::move(node.second)); + for (auto id_xor : result_list_) { + auto it = nodes_.find(id_xor); + CHECK(it != nodes_.end()); + list.push_back(it->second.node.clone()); } CHECK(list.size() <= k_); VLOG(DHT_EXTRA_DEBUG) << this << ": finalizing " << get_name() << " query. List size=" << list.size(); @@ -65,30 +74,32 @@ void DhtQuery::add_nodes(DhtNodesList list) { for (auto &node : list.list()) { auto id = node.get_key(); auto id_xor = key_ ^ id; - if (list_.find(id_xor) != list_.end()) { + if (nodes_.find(id_xor) != nodes_.end()) { continue; } - td::actor::send_closure(node_, &DhtMember::add_full_node, id, node.clone()); + VLOG(DHT_EXTRA_DEBUG) << this << ": " << get_name() << " query: adding " << id << " key"; + td::actor::send_closure(node_, &DhtMember::add_full_node, id, node.clone(), false); + nodes_[id_xor].node = std::move(node); + pending_queries_.insert(id_xor); + } +} - DhtKeyId last_id_xor; - if (list_.size() > 0) { - last_id_xor = list_.rbegin()->first; +void DhtQuery::finish_query(adnl::AdnlNodeIdShort id, bool success) { + active_queries_--; + CHECK(active_queries_ <= k_); + auto id_xor = key_ ^ DhtKeyId(id); + if (success) { + result_list_.insert(id_xor); + if (result_list_.size() > k_) { + result_list_.erase(--result_list_.end()); } - - if (list_.size() < k_ || id_xor < last_id_xor) { - list_[id_xor] = std::move(node); - pending_ids_.insert(id_xor); - if (list_.size() > k_) { - CHECK(id_xor != last_id_xor); - VLOG(DHT_EXTRA_DEBUG) << this << ": " << get_name() << " query: replacing " << (last_id_xor ^ key_) - << " key with " << id; - pending_ids_.erase(last_id_xor); - list_.erase(last_id_xor); - } else { - VLOG(DHT_EXTRA_DEBUG) << this << ": " << get_name() << " query: adding " << id << " key"; - } + } else { + NodeInfo &info = nodes_[id_xor]; + if (++info.failed_attempts < MAX_ATTEMPTS) { + pending_queries_.insert(id_xor); } } + send_queries(); } void DhtQueryFindNodes::send_one_query(adnl::AdnlNodeIdShort id) { @@ -111,7 +122,7 @@ void DhtQueryFindNodes::send_one_query(adnl::AdnlNodeIdShort id) { void DhtQueryFindNodes::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed find nodes query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } @@ -122,7 +133,7 @@ void DhtQueryFindNodes::on_result(td::Result R, adnl::AdnlNodeI } else { add_nodes(DhtNodesList{Res.move_as_ok(), our_network_id()}); } - finish_query(); + finish_query(dst); } void DhtQueryFindNodes::finish(DhtNodesList list) { @@ -166,14 +177,14 @@ void DhtQueryFindValue::send_one_query_nodes(adnl::AdnlNodeIdShort id) { void DhtQueryFindValue::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed find value query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto Res = fetch_tl_object(R.move_as_ok(), true); if (Res.is_error()) { VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.findValue query from " << dst << ": " << Res.move_as_error(); - finish_query(); + finish_query(dst, false); return; } @@ -210,26 +221,26 @@ void DhtQueryFindValue::on_result(td::Result R, adnl::AdnlNodeI } else if (send_get_nodes) { send_one_query_nodes(dst); } else { - finish_query(); + finish_query(dst); } } void DhtQueryFindValue::on_result_nodes(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed find nodes query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto Res = fetch_tl_object(R.move_as_ok(), true); if (Res.is_error()) { VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.findNodes query from " << dst << ": " << Res.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto r = Res.move_as_ok(); add_nodes(DhtNodesList{create_tl_object(std::move(r->nodes_)), our_network_id()}); - finish_query(); + finish_query(dst); } void DhtQueryFindValue::finish(DhtNodesList list) { @@ -422,14 +433,14 @@ void DhtQueryRequestReversePing::send_one_query(adnl::AdnlNodeIdShort id) { void DhtQueryRequestReversePing::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed reverse ping query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto Res = fetch_tl_object(R.move_as_ok(), true); if (Res.is_error()) { VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.requestReversePing query from " << dst << ": " << Res.move_as_error(); - finish_query(); + finish_query(dst, false); return; } @@ -441,7 +452,7 @@ void DhtQueryRequestReversePing::on_result(td::Result R, adnl:: }, [&](ton_api::dht_clientNotFound &v) { add_nodes(DhtNodesList{std::move(v.nodes_), our_network_id()}); - finish_query(); + finish_query(dst); })); } diff --git a/dht/dht-query.hpp b/dht/dht-query.hpp index c1db0a0e..e4740361 100644 --- a/dht/dht-query.hpp +++ b/dht/dht-query.hpp @@ -63,11 +63,7 @@ class DhtQuery : public td::actor::Actor { } void send_queries(); void add_nodes(DhtNodesList list); - void finish_query() { - active_queries_--; - CHECK(active_queries_ <= k_); - send_queries(); - } + void finish_query(adnl::AdnlNodeIdShort id, bool success = true); DhtKeyId get_key() const { return key_; } @@ -88,16 +84,22 @@ class DhtQuery : public td::actor::Actor { virtual std::string get_name() const = 0; private: + struct NodeInfo { + DhtNode node; + int failed_attempts = 0; + }; DhtMember::PrintId print_id_; adnl::AdnlNodeIdShort src_; - std::map list_; - std::set pending_ids_; + std::map nodes_; + std::set result_list_, pending_queries_; td::uint32 k_; td::uint32 a_; td::int32 our_network_id_; td::actor::ActorId node_; td::uint32 active_queries_ = 0; + static const int MAX_ATTEMPTS = 1; + protected: td::actor::ActorId adnl_; }; diff --git a/dht/dht.cpp b/dht/dht.cpp index e1e20d45..8d7b02b7 100644 --- a/dht/dht.cpp +++ b/dht/dht.cpp @@ -57,7 +57,7 @@ td::Result> Dht::create(adnl::AdnlNodeIdShort id, std:: for (auto &node : nodes.list()) { auto key = node.get_key(); - td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone()); + td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone(), true); } return std::move(D); } @@ -74,7 +74,7 @@ td::Result> Dht::create_client(adnl::AdnlNodeIdShort id for (auto &node : nodes.list()) { auto key = node.get_key(); - td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone()); + td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone(), true); } return std::move(D); } @@ -368,7 +368,7 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat auto node = N.move_as_ok(); if (node.adnl_id().compute_short_id() == src) { auto key = node.get_key(); - add_full_node_impl(key, std::move(node), true); + add_full_node(key, std::move(node), true); } else { VLOG(DHT_WARNING) << this << ": dropping bad node: unexpected adnl id"; } @@ -398,7 +398,7 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat ton_api::downcast_call(*Q, [&](auto &object) { this->process_query(src, object, std::move(promise)); }); } -void DhtMemberImpl::add_full_node_impl(DhtKeyId key, DhtNode node, bool set_active) { +void DhtMemberImpl::add_full_node(DhtKeyId key, DhtNode node, bool set_active) { VLOG(DHT_EXTRA_DEBUG) << this << ": adding full node " << key; auto eid = key ^ key_; @@ -466,7 +466,7 @@ void DhtMemberImpl::set_value(DhtValue value, td::Promise promise) { void DhtMemberImpl::get_value_in(DhtKeyId key, td::Promise result) { auto P = td::PromiseCreator::lambda([key, promise = std::move(result), SelfId = actor_id(this), print_id = print_id(), - adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, a = a_, + adnl = adnl_, list = get_nearest_nodes(key, k_ * 2), k = k_, a = a_, network_id = network_id_, id = id_, client_only = client_only_](td::Result R) mutable { R.ensure(); @@ -485,7 +485,7 @@ void DhtMemberImpl::register_reverse_connection(adnl::AdnlNodeIdFull client, td: auto key_id = get_reverse_connection_key(client_short).compute_key_id(); td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, client_short.pubkey_hash(), register_reverse_connection_to_sign(client_short, id_, ttl), - [=, print_id = print_id(), list = get_nearest_nodes(key_id, k_), SelfId = actor_id(this), + [=, print_id = print_id(), list = get_nearest_nodes(key_id, k_ * 2), SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { TRY_RESULT_PROMISE_PREFIX(promise, signature, std::move(R), "Failed to sign: "); td::actor::send_closure(SelfId, &DhtMemberImpl::get_self_node, @@ -532,7 +532,7 @@ void DhtMemberImpl::request_reverse_ping_cont(adnl::AdnlNode target, td::BufferS } auto key_id = get_reverse_connection_key(client).compute_key_id(); get_self_node([=, target = std::move(target), signature = std::move(signature), promise = std::move(promise), - SelfId = actor_id(this), print_id = print_id(), list = get_nearest_nodes(key_id, k_), + SelfId = actor_id(this), print_id = print_id(), list = get_nearest_nodes(key_id, k_ * 2), client_only = client_only_](td::Result R) mutable { R.ensure(); td::actor::create_actor( @@ -651,8 +651,8 @@ void DhtMemberImpl::check() { DhtKeyId key{x}; auto P = td::PromiseCreator::lambda([key, promise = std::move(promise), SelfId = actor_id(this), - print_id = print_id(), adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, - a = a_, network_id = network_id_, id = id_, + print_id = print_id(), adnl = adnl_, list = get_nearest_nodes(key, k_ * 2), + k = k_, a = a_, network_id = network_id_, id = id_, client_only = client_only_](td::Result R) mutable { R.ensure(); td::actor::create_actor("FindNodesQuery", key, print_id, id, std::move(list), k, a, network_id, @@ -677,8 +677,8 @@ void DhtMemberImpl::send_store(DhtValue value, td::Promise promise) { auto key_id = value.key_id(); auto P = td::PromiseCreator::lambda([value = std::move(value), print_id = print_id(), id = id_, - client_only = client_only_, list = get_nearest_nodes(key_id, k_), k = k_, a = a_, - network_id = network_id_, SelfId = actor_id(this), adnl = adnl_, + client_only = client_only_, list = get_nearest_nodes(key_id, k_ * 2), k = k_, + a = a_, network_id = network_id_, SelfId = actor_id(this), adnl = adnl_, promise = std::move(promise)](td::Result R) mutable { R.ensure(); td::actor::create_actor("StoreQuery", std::move(value), print_id, id, std::move(list), k, a, diff --git a/dht/dht.hpp b/dht/dht.hpp index 0b46d635..9fb05e08 100644 --- a/dht/dht.hpp +++ b/dht/dht.hpp @@ -95,7 +95,7 @@ class DhtMember : public Dht { //virtual void update_addr_list(tl_object_ptr addr_list) = 0; //virtual void add_node(adnl::AdnlNodeIdShort id) = 0; - virtual void add_full_node(DhtKeyId id, DhtNode node) = 0; + virtual void add_full_node(DhtKeyId id, DhtNode node, bool set_active) = 0; virtual void receive_ping(DhtKeyId id, DhtNode result) = 0; diff --git a/overlay/overlay-manager.cpp b/overlay/overlay-manager.cpp index 43192190..3c5f5eab 100644 --- a/overlay/overlay-manager.cpp +++ b/overlay/overlay-manager.cpp @@ -93,17 +93,17 @@ void OverlayManager::create_public_overlay(adnl::AdnlNodeIdShort local_id, Overl std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) { create_public_overlay_ex(local_id, std::move(overlay_id), std::move(callback), std::move(rules), std::move(scope), - true); + {}); } void OverlayManager::create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, - td::string scope, bool announce_self) { + td::string scope, OverlayOptions opts) { CHECK(!dht_node_.empty()); auto id = overlay_id.compute_short_id(); register_overlay(local_id, id, Overlay::create(keyring_, adnl_, actor_id(this), dht_node_, local_id, std::move(overlay_id), - std::move(callback), std::move(rules), scope, announce_self)); + std::move(callback), std::move(rules), scope, std::move(opts))); } void OverlayManager::create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, diff --git a/overlay/overlay-manager.h b/overlay/overlay-manager.h index fe1166ac..035ef3e8 100644 --- a/overlay/overlay-manager.h +++ b/overlay/overlay-manager.h @@ -54,7 +54,7 @@ class OverlayManager : public Overlays { std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) override; void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope, - bool announce_self) override; + OverlayOptions opts) override; void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) override; diff --git a/overlay/overlay.cpp b/overlay/overlay.cpp index fcf766fe..af01e045 100644 --- a/overlay/overlay.cpp +++ b/overlay/overlay.cpp @@ -37,10 +37,10 @@ td::actor::ActorOwn Overlay::create(td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool announce_self) { + OverlayPrivacyRules rules, td::string scope, OverlayOptions opts) { auto R = td::actor::create_actor("overlay", keyring, adnl, manager, dht_node, local_id, std::move(overlay_id), true, std::vector(), - std::move(callback), std::move(rules), scope, announce_self); + std::move(callback), std::move(rules), scope, opts); return td::actor::ActorOwn(std::move(R)); } @@ -60,7 +60,7 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool announce_self) + OverlayPrivacyRules rules, td::string scope, OverlayOptions opts) : keyring_(keyring) , adnl_(adnl) , manager_(manager) @@ -71,7 +71,8 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor , public_(pub) , rules_(std::move(rules)) , scope_(scope) - , announce_self_(announce_self) { + , announce_self_(opts.announce_self_) + , frequent_dht_lookup_(opts.frequent_dht_lookup_) { overlay_id_ = id_full_.compute_short_id(); VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private"); @@ -279,13 +280,13 @@ void OverlayImpl::alarm() { send_random_peers(P->get_id(), {}); } } - if (next_dht_query_.is_in_past()) { + if (next_dht_query_ && next_dht_query_.is_in_past()) { + next_dht_query_ = td::Timestamp::never(); auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result res) { td::actor::send_closure(SelfId, &OverlayImpl::receive_dht_nodes, std::move(res), true); }); td::actor::send_closure(dht_node_, &dht::Dht::get_value, dht::DhtKey{overlay_id_.pubkey_hash(), "nodes", 0}, std::move(P)); - next_dht_query_ = td::Timestamp::in(td::Random::fast(60.0, 100.0)); } if (update_db_at_.is_in_past()) { if (peers_.size() > 0) { @@ -333,7 +334,13 @@ void OverlayImpl::receive_dht_nodes(td::Result res, bool dummy) { VLOG(OVERLAY_NOTICE) << this << ": can not get value from DHT: " << res.move_as_error(); } + if (!(next_dht_store_query_ && next_dht_store_query_.is_in_past())) { + finish_dht_query(); + return; + } + next_dht_store_query_ = td::Timestamp::never(); if (!announce_self_) { + finish_dht_query(); return; } @@ -341,6 +348,7 @@ void OverlayImpl::receive_dht_nodes(td::Result res, bool dummy) { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), oid = print_id()](td::Result R) { if (R.is_error()) { LOG(ERROR) << oid << "cannot get self node"; + td::actor::send_closure(SelfId, &OverlayImpl::finish_dht_query); return; } td::actor::send_closure(SelfId, &OverlayImpl::update_dht_nodes, R.move_as_ok()); @@ -365,10 +373,11 @@ void OverlayImpl::update_dht_nodes(OverlayNode node) { static_cast(td::Clocks::system() + 3600), td::BufferSlice()}; value.check().ensure(); - auto P = td::PromiseCreator::lambda([oid = print_id()](td::Result res) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), oid = print_id()](td::Result res) { if (res.is_error()) { VLOG(OVERLAY_NOTICE) << oid << ": error storing to DHT: " << res.move_as_error(); } + td::actor::send_closure(SelfId, &OverlayImpl::finish_dht_query); }); td::actor::send_closure(dht_node_, &dht::Dht::set_value, std::move(value), std::move(P)); diff --git a/overlay/overlay.h b/overlay/overlay.h index a5f7b3a4..da41a247 100644 --- a/overlay/overlay.h +++ b/overlay/overlay.h @@ -42,7 +42,7 @@ class Overlay : public td::actor::Actor { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool announce_self = true); + OverlayPrivacyRules rules, td::string scope, OverlayOptions opts = {}); static td::actor::ActorOwn create(td::actor::ActorId keyring, td::actor::ActorId adnl, td::actor::ActorId manager, diff --git a/overlay/overlay.hpp b/overlay/overlay.hpp index 86d37d5b..90fcc43d 100644 --- a/overlay/overlay.hpp +++ b/overlay/overlay.hpp @@ -82,12 +82,17 @@ class OverlayPeer { void on_ping_result(bool success) { if (success) { missed_pings_ = 0; + last_ping_at_ = td::Timestamp::now(); + is_alive_ = true; } else { ++missed_pings_; + if (missed_pings_ >= 3 && last_ping_at_.is_in_past(td::Timestamp::in(-15.0))) { + is_alive_ = false; + } } } bool is_alive() const { - return missed_pings_ < 3; + return is_alive_; } td::uint32 throughput_out_bytes = 0; @@ -116,6 +121,8 @@ class OverlayPeer { bool is_neighbour_ = false; size_t missed_pings_ = 0; + bool is_alive_ = true; + td::Timestamp last_ping_at_ = td::Timestamp::now(); }; class OverlayImpl : public Overlay { @@ -124,7 +131,7 @@ class OverlayImpl : public Overlay { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool announce_self = true); + OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", OverlayOptions opts = {}); void update_dht_node(td::actor::ActorId dht) override { dht_node_ = dht; } @@ -295,6 +302,17 @@ class OverlayImpl : public Overlay { void del_peer(adnl::AdnlNodeIdShort id); OverlayPeer *get_random_peer(bool only_alive = false); + void finish_dht_query() { + if (!next_dht_store_query_) { + next_dht_store_query_ = td::Timestamp::in(td::Random::fast(60.0, 100.0)); + } + if (frequent_dht_lookup_ && peers_.size() == bad_peers_.size()) { + next_dht_query_ = td::Timestamp::in(td::Random::fast(6.0, 10.0)); + } else { + next_dht_query_ = next_dht_store_query_; + } + } + td::actor::ActorId keyring_; td::actor::ActorId adnl_; td::actor::ActorId manager_; @@ -305,6 +323,7 @@ class OverlayImpl : public Overlay { td::DecTree peers_; td::Timestamp next_dht_query_ = td::Timestamp::in(1.0); + td::Timestamp next_dht_store_query_ = td::Timestamp::in(1.0); td::Timestamp update_db_at_; td::Timestamp update_throughput_at_; td::Timestamp last_throughput_update_; @@ -367,6 +386,7 @@ class OverlayImpl : public Overlay { OverlayPrivacyRules rules_; td::string scope_; bool announce_self_ = true; + bool frequent_dht_lookup_ = false; std::map> certs_; class CachedEncryptor : public td::ListNode { diff --git a/overlay/overlays.h b/overlay/overlays.h index e12bbbdb..79551e05 100644 --- a/overlay/overlays.h +++ b/overlay/overlays.h @@ -158,6 +158,11 @@ class Certificate { td::SharedSlice signature_; }; +struct OverlayOptions { + bool announce_self_ = true; + bool frequent_dht_lookup_ = false; +}; + class Overlays : public td::actor::Actor { public: class Callback { @@ -197,7 +202,7 @@ class Overlays : public td::actor::Actor { td::string scope) = 0; virtual void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, - td::string scope, bool announce_self) = 0; + td::string scope, OverlayOptions opts) = 0; virtual void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) = 0; diff --git a/rldp-http-proxy/rldp-http-proxy.cpp b/rldp-http-proxy/rldp-http-proxy.cpp index e9186822..0d518d6d 100644 --- a/rldp-http-proxy/rldp-http-proxy.cpp +++ b/rldp-http-proxy/rldp-http-proxy.cpp @@ -54,6 +54,7 @@ #include "git.h" #include "td/utils/BufferedFd.h" #include "common/delay.h" +#include "td/utils/port/path.h" #include "tonlib/tonlib/TonlibClientWrapper.h" #include "DNSResolver.h" @@ -920,6 +921,12 @@ class RldpHttpProxy : public td::actor::Actor { } void run() { + if (!db_root_.empty()) { + td::mkpath(db_root_ + "/").ensure(); + } else if (!is_client_) { + LOG(ERROR) << "DB root is required for server proxy"; + std::_Exit(2); + } keyring_ = ton::keyring::Keyring::create(is_client_ ? std::string("") : (db_root_ + "/keyring")); { auto S = load_global_config(); @@ -955,9 +962,16 @@ class RldpHttpProxy : public td::actor::Actor { auto conf_dataR = td::read_file(global_config_); conf_dataR.ensure(); + ton::tl_object_ptr key_store; + if (db_root_.empty()) { + key_store = tonlib_api::make_object(); + } else { + td::mkpath(db_root_ + "/tonlib-cache/").ensure(); + key_store = tonlib_api::make_object(db_root_ + "/tonlib-cache/"); + } auto tonlib_options = tonlib_api::make_object( tonlib_api::make_object(conf_dataR.move_as_ok().as_slice().str(), "", false, false), - tonlib_api::make_object()); + std::move(key_store)); tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); dns_resolver_ = td::actor::create_actor("dnsresolver", tonlib_client_.get()); } diff --git a/storage/PeerActor.cpp b/storage/PeerActor.cpp index 0cb21c0a..48d45626 100644 --- a/storage/PeerActor.cpp +++ b/storage/PeerActor.cpp @@ -25,6 +25,7 @@ #include "td/utils/overloaded.h" #include "td/utils/Random.h" #include "vm/boc.h" +#include "common/delay.h" namespace ton { @@ -119,9 +120,9 @@ void PeerActor::on_get_piece_result(PartId piece_id, td::Result return std::move(res); }(); if (res.is_error()) { - LOG(DEBUG) << "getPiece " << piece_id << "query: " << res.error(); + LOG(DEBUG) << "getPiece " << piece_id << " query: " << res.error(); } else { - LOG(DEBUG) << "getPiece " << piece_id << "query: OK"; + LOG(DEBUG) << "getPiece " << piece_id << " query: OK"; } state_->node_queries_results_.add_element(std::make_pair(piece_id, std::move(res))); notify_node(); @@ -343,11 +344,20 @@ void PeerActor::loop_node_get_piece() { } auto piece_size = std::min(torrent_info_->piece_size, torrent_info_->file_size - part * torrent_info_->piece_size); - td::actor::send_closure(state_->speed_limiters_.download, &SpeedLimiter::enqueue, (double)piece_size, - td::Timestamp::in(3.0), [part, SelfId = actor_id(this)](td::Result R) { - td::actor::send_closure(SelfId, &PeerActor::node_get_piece_query_ready, part, - std::move(R)); - }); + td::Timestamp timeout = td::Timestamp::in(3.0); + td::actor::send_closure( + state_->speed_limiters_.download, &SpeedLimiter::enqueue, (double)piece_size, timeout, + [=, SelfId = actor_id(this)](td::Result R) { + if (R.is_ok()) { + td::actor::send_closure(SelfId, &PeerActor::node_get_piece_query_ready, part, std::move(R)); + } else { + delay_action( + [=, R = std::move(R)]() mutable { + td::actor::send_closure(SelfId, &PeerActor::node_get_piece_query_ready, part, std::move(R)); + }, + timeout); + } + }); } } diff --git a/storage/PeerManager.h b/storage/PeerManager.h index 52297ac5..38d1494b 100644 --- a/storage/PeerManager.h +++ b/storage/PeerManager.h @@ -143,9 +143,11 @@ class PeerManager : public td::actor::Actor { td::actor::ActorId peer_manager_; ton::adnl::AdnlNodeIdShort dst_; }; + ton::overlay::OverlayOptions opts; + opts.announce_self_ = !client_mode_; + opts.frequent_dht_lookup_ = true; send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay_ex, src_id, overlay_id_.clone(), - std::make_unique(actor_id(this), src_id), rules, R"({ "type": "storage" })", - !client_mode_); + std::make_unique(actor_id(this), src_id), rules, R"({ "type": "storage" })", opts); } promise.set_value({}); } diff --git a/storage/SpeedLimiter.cpp b/storage/SpeedLimiter.cpp index 952005fe..704c7402 100644 --- a/storage/SpeedLimiter.cpp +++ b/storage/SpeedLimiter.cpp @@ -16,6 +16,7 @@ */ #include "SpeedLimiter.h" +#include "common/errorcode.h" namespace ton { @@ -41,11 +42,11 @@ void SpeedLimiter::enqueue(double size, td::Timestamp timeout, td::Promise( tonlib_api::make_object(r_conf_data.move_as_ok().as_slice().str(), "", false, false), - tonlib_api::make_object()); + tonlib_api::make_object(key_store)); tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); } From 6c615a105a29f272f0f5b6ab5c51d8b10fd54264 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Thu, 28 Dec 2023 17:40:54 +0300 Subject: [PATCH 46/71] Fix generating block header proof (#841) Co-authored-by: SpyCheese --- validator/impl/liteserver.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/validator/impl/liteserver.cpp b/validator/impl/liteserver.cpp index 86fc690a..34b9f1f1 100644 --- a/validator/impl/liteserver.cpp +++ b/validator/impl/liteserver.cpp @@ -1034,7 +1034,8 @@ bool LiteQuery::make_state_root_proof(Ref& proof, Ref state_ vm::MerkleProofBuilder pb{std::move(block_root)}; block::gen::Block::Record blk; block::gen::BlockInfo::Record info; - if (!(tlb::unpack_cell(pb.root(), blk) && tlb::unpack_cell(blk.info, info))) { + if (!(tlb::unpack_cell(pb.root(), blk) && tlb::unpack_cell(blk.info, info) && + block::gen::BlkPrevInfo(info.after_merge).validate_ref(info.prev_ref))) { return fatal_error("cannot unpack block header"); } vm::CellSlice upd_cs{vm::NoVmSpec(), blk.state_update}; From a52045bd91004bccabf10fd159371ffbbf60ffad Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Thu, 28 Dec 2023 21:54:55 +0300 Subject: [PATCH 47/71] Add special overlay for validators for block broadcasting (#842) * Private overlay for broadcasting blocks --------- Co-authored-by: SpyCheese --- adnl/adnl-peer-table.hpp | 4 + adnl/adnl.h | 2 + tl/generate/scheme/ton_api.tl | 2 + tl/generate/scheme/ton_api.tlo | Bin 86116 -> 86348 bytes ton/ton-types.h | 8 ++ validator/CMakeLists.txt | 4 +- validator/full-node-private-overlay.cpp | 175 ++++++++++++++++++++++++ validator/full-node-private-overlay.hpp | 86 ++++++++++++ validator/full-node.cpp | 51 +++++++ validator/full-node.hpp | 7 + 10 files changed, 338 insertions(+), 1 deletion(-) create mode 100644 validator/full-node-private-overlay.cpp create mode 100644 validator/full-node-private-overlay.hpp diff --git a/adnl/adnl-peer-table.hpp b/adnl/adnl-peer-table.hpp index 2a27a802..1c30b84c 100644 --- a/adnl/adnl-peer-table.hpp +++ b/adnl/adnl-peer-table.hpp @@ -77,6 +77,10 @@ class AdnlPeerTableImpl : public AdnlPeerTable { td::actor::ActorId channel) override; void unregister_channel(AdnlChannelIdShort id) override; + void check_id_exists(AdnlNodeIdShort id, td::Promise promise) override { + promise.set_value(local_ids_.count(id)); + } + void write_new_addr_list_to_db(AdnlNodeIdShort local_id, AdnlNodeIdShort peer_id, AdnlDbItem node, td::Promise promise) override; void get_addr_list_from_db(AdnlNodeIdShort local_id, AdnlNodeIdShort peer_id, diff --git a/adnl/adnl.h b/adnl/adnl.h index b7dad216..a1c39d5e 100644 --- a/adnl/adnl.h +++ b/adnl/adnl.h @@ -97,6 +97,8 @@ class Adnl : public AdnlSenderInterface { virtual void add_id_ex(AdnlNodeIdFull id, AdnlAddressList addr_list, td::uint8 cat, td::uint32 mode) = 0; virtual void del_id(AdnlNodeIdShort id, td::Promise promise) = 0; + virtual void check_id_exists(AdnlNodeIdShort id, td::Promise promise) = 0; + // subscribe to (some) messages(+queries) to this local id virtual void subscribe(AdnlNodeIdShort dst, std::string prefix, std::unique_ptr callback) = 0; virtual void unsubscribe(AdnlNodeIdShort dst, std::string prefix) = 0; diff --git a/tl/generate/scheme/ton_api.tl b/tl/generate/scheme/ton_api.tl index 346d9152..7c9d9d5d 100644 --- a/tl/generate/scheme/ton_api.tl +++ b/tl/generate/scheme/ton_api.tl @@ -394,6 +394,8 @@ tonNode.newShardBlockBroadcast block:tonNode.newShardBlock = tonNode.Broadcast; tonNode.shardPublicOverlayId workchain:int shard:long zero_state_file_hash:int256 = tonNode.ShardPublicOverlayId; +tonNode.privateBlockOverlayId zero_state_file_hash:int256 nodes:(vector int256) = tonNode.PrivateBlockOverlayId; + tonNode.keyBlocks blocks:(vector tonNode.blockIdExt) incomplete:Bool error:Bool = tonNode.KeyBlocks; ton.blockId root_cell_hash:int256 file_hash:int256 = ton.BlockId; diff --git a/tl/generate/scheme/ton_api.tlo b/tl/generate/scheme/ton_api.tlo index 5b8e1c725e39ceff4097e4a654c796c9f84ff73e..34f31816487195f10af67445b0923d8f5474c3f3 100644 GIT binary patch delta 139 zcmaE|fc4BGR^CUm^{p77;QvNmXI-VF8()^mmgML8<)@_T1r%kLC6=T*<>V)4`mrcA!aEVVg6*M)_#cJoxj?R#}mv=m^|!oUDhbMu&x57Xq<55|)(MA@)_L^eCT HS4ji_gWEXS delta 41 scmX@Ji1o<=R^CUm^{p77;NM1GXWh*`x-Kk?HJdLQZr{7v new_signatures; + for (const BlockSignature& s : signatures) { + new_signatures.emplace_back(s.node, s.signature.clone()); + } + return {block_id, std::move(new_signatures), catchain_seqno, validator_set_hash, data.clone(), proof.clone()}; + } }; struct Ed25519_PrivateKey { diff --git a/validator/CMakeLists.txt b/validator/CMakeLists.txt index 4ecc865c..8de60081 100644 --- a/validator/CMakeLists.txt +++ b/validator/CMakeLists.txt @@ -143,7 +143,9 @@ set(FULL_NODE_SOURCE full-node-master.h full-node-master.hpp full-node-master.cpp - + full-node-private-overlay.hpp + full-node-private-overlay.cpp + net/download-block.hpp net/download-block.cpp net/download-block-new.hpp diff --git a/validator/full-node-private-overlay.cpp b/validator/full-node-private-overlay.cpp new file mode 100644 index 00000000..ea72230b --- /dev/null +++ b/validator/full-node-private-overlay.cpp @@ -0,0 +1,175 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#pragma once + +#include "full-node-private-overlay.hpp" +#include "ton/ton-tl.hpp" +#include "common/delay.h" + +namespace ton { + +namespace validator { + +namespace fullnode { + +void FullNodePrivateOverlay::process_broadcast(PublicKeyHash, ton_api::tonNode_blockBroadcast &query) { + std::vector signatures; + for (auto &sig : query.signatures_) { + signatures.emplace_back(BlockSignature{sig->who_, std::move(sig->signature_)}); + } + + BlockIdExt block_id = create_block_id(query.id_); + BlockBroadcast B{block_id, + std::move(signatures), + static_cast(query.catchain_seqno_), + static_cast(query.validator_set_hash_), + std::move(query.data_), + std::move(query.proof_)}; + + auto P = td::PromiseCreator::lambda([](td::Result R) { + if (R.is_error()) { + if (R.error().code() == ErrorCode::notready) { + LOG(DEBUG) << "dropped broadcast: " << R.move_as_error(); + } else { + LOG(INFO) << "dropped broadcast: " << R.move_as_error(); + } + } + }); + td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::prevalidate_block, std::move(B), + std::move(P)); +} + +void FullNodePrivateOverlay::process_broadcast(PublicKeyHash, ton_api::tonNode_newShardBlockBroadcast &query) { + td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::new_shard_block, + create_block_id(query.block_->block_), query.block_->cc_seqno_, + std::move(query.block_->data_)); +} + +void FullNodePrivateOverlay::receive_broadcast(PublicKeyHash src, td::BufferSlice broadcast) { + auto B = fetch_tl_object(std::move(broadcast), true); + if (B.is_error()) { + return; + } + + ton_api::downcast_call(*B.move_as_ok(), [src, Self = this](auto &obj) { Self->process_broadcast(src, obj); }); +} + +void FullNodePrivateOverlay::send_shard_block_info(BlockIdExt block_id, CatchainSeqno cc_seqno, td::BufferSlice data) { + if (!inited_) { + return; + } + auto B = create_serialize_tl_object( + create_tl_object(create_tl_block_id(block_id), cc_seqno, std::move(data))); + if (B.size() <= overlay::Overlays::max_simple_broadcast_size()) { + td::actor::send_closure(overlays_, &overlay::Overlays::send_broadcast_ex, local_id_, overlay_id_, + local_id_.pubkey_hash(), 0, std::move(B)); + } else { + td::actor::send_closure(overlays_, &overlay::Overlays::send_broadcast_fec_ex, local_id_, overlay_id_, + local_id_.pubkey_hash(), overlay::Overlays::BroadcastFlagAnySender(), std::move(B)); + } +} + +void FullNodePrivateOverlay::send_broadcast(BlockBroadcast broadcast) { + if (!inited_) { + return; + } + std::vector> sigs; + for (auto &sig : broadcast.signatures) { + sigs.emplace_back(create_tl_object(sig.node, sig.signature.clone())); + } + auto B = create_serialize_tl_object( + create_tl_block_id(broadcast.block_id), broadcast.catchain_seqno, broadcast.validator_set_hash, std::move(sigs), + broadcast.proof.clone(), broadcast.data.clone()); + td::actor::send_closure(overlays_, &overlay::Overlays::send_broadcast_fec_ex, local_id_, overlay_id_, + local_id_.pubkey_hash(), overlay::Overlays::BroadcastFlagAnySender(), std::move(B)); +} + +void FullNodePrivateOverlay::start_up() { + std::sort(nodes_.begin(), nodes_.end()); + nodes_.erase(std::unique(nodes_.begin(), nodes_.end()), nodes_.end()); + + std::vector nodes; + for (const adnl::AdnlNodeIdShort &id : nodes_) { + nodes.push_back(id.bits256_value()); + } + auto X = create_hash_tl_object(zero_state_file_hash_, std::move(nodes)); + td::BufferSlice b{32}; + b.as_slice().copy_from(as_slice(X)); + overlay_id_full_ = overlay::OverlayIdFull{std::move(b)}; + overlay_id_ = overlay_id_full_.compute_short_id(); + + try_init(); +} + +void FullNodePrivateOverlay::try_init() { + // Sometimes adnl id is added to validator engine later (or not at all) + td::actor::send_closure( + adnl_, &adnl::Adnl::check_id_exists, local_id_, [SelfId = actor_id(this)](td::Result R) { + if (R.is_ok() && R.ok()) { + td::actor::send_closure(SelfId, &FullNodePrivateOverlay::init); + } else { + delay_action([SelfId]() { td::actor::send_closure(SelfId, &FullNodePrivateOverlay::try_init); }, + td::Timestamp::in(30.0)); + } + }); +} + +void FullNodePrivateOverlay::init() { + LOG(FULL_NODE_INFO) << "Creating private block overlay for adnl id " << local_id_ << " : " << nodes_.size() + << " nodes"; + class Callback : public overlay::Overlays::Callback { + public: + void receive_message(adnl::AdnlNodeIdShort src, overlay::OverlayIdShort overlay_id, td::BufferSlice data) override { + } + void receive_query(adnl::AdnlNodeIdShort src, overlay::OverlayIdShort overlay_id, td::BufferSlice data, + td::Promise promise) override { + } + void receive_broadcast(PublicKeyHash src, overlay::OverlayIdShort overlay_id, td::BufferSlice data) override { + td::actor::send_closure(node_, &FullNodePrivateOverlay::receive_broadcast, src, std::move(data)); + } + void check_broadcast(PublicKeyHash src, overlay::OverlayIdShort overlay_id, td::BufferSlice data, + td::Promise promise) override { + } + Callback(td::actor::ActorId node) : node_(node) { + } + + private: + td::actor::ActorId node_; + }; + + overlay::OverlayPrivacyRules rules{overlay::Overlays::max_fec_broadcast_size(), + overlay::CertificateFlags::AllowFec | overlay::CertificateFlags::Trusted, + {}}; + td::actor::send_closure(overlays_, &overlay::Overlays::create_private_overlay, local_id_, overlay_id_full_.clone(), + nodes_, std::make_unique(actor_id(this)), rules); + + td::actor::send_closure(rldp_, &rldp::Rldp::add_id, local_id_); + td::actor::send_closure(rldp2_, &rldp2::Rldp::add_id, local_id_); + inited_ = true; +} + +void FullNodePrivateOverlay::tear_down() { + if (inited_) { + td::actor::send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, local_id_, overlay_id_); + } +} + +} // namespace fullnode + +} // namespace validator + +} // namespace ton diff --git a/validator/full-node-private-overlay.hpp b/validator/full-node-private-overlay.hpp new file mode 100644 index 00000000..6463fda2 --- /dev/null +++ b/validator/full-node-private-overlay.hpp @@ -0,0 +1,86 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#pragma once + +#include "full-node.h" + +namespace ton { + +namespace validator { + +namespace fullnode { + +class FullNodePrivateOverlay : public td::actor::Actor { + public: + void process_broadcast(PublicKeyHash src, ton_api::tonNode_blockBroadcast &query); + void process_broadcast(PublicKeyHash src, ton_api::tonNode_newShardBlockBroadcast &query); + template + void process_broadcast(PublicKeyHash, T &) { + VLOG(FULL_NODE_WARNING) << "dropping unknown broadcast"; + } + void receive_broadcast(PublicKeyHash src, td::BufferSlice query); + + void send_shard_block_info(BlockIdExt block_id, CatchainSeqno cc_seqno, td::BufferSlice data); + void send_broadcast(BlockBroadcast broadcast); + + void start_up() override; + void tear_down() override; + + FullNodePrivateOverlay(adnl::AdnlNodeIdShort local_id, std::vector nodes, + FileHash zero_state_file_hash, FullNodeConfig config, + td::actor::ActorId keyring, td::actor::ActorId adnl, + td::actor::ActorId rldp, td::actor::ActorId rldp2, + td::actor::ActorId overlays, + td::actor::ActorId validator_manager) + : local_id_(local_id) + , nodes_(std::move(nodes)) + , zero_state_file_hash_(zero_state_file_hash) + , config_(config) + , keyring_(keyring) + , adnl_(adnl) + , rldp_(rldp) + , rldp2_(rldp2) + , overlays_(overlays) + , validator_manager_(validator_manager) { + } + + private: + adnl::AdnlNodeIdShort local_id_; + std::vector nodes_; + FileHash zero_state_file_hash_; + FullNodeConfig config_; + + td::actor::ActorId keyring_; + td::actor::ActorId adnl_; + td::actor::ActorId rldp_; + td::actor::ActorId rldp2_; + td::actor::ActorId overlays_; + td::actor::ActorId validator_manager_; + + bool inited_ = false; + overlay::OverlayIdFull overlay_id_full_; + overlay::OverlayIdShort overlay_id_; + + void try_init(); + void init(); +}; + +} // namespace fullnode + +} // namespace validator + +} // namespace ton diff --git a/validator/full-node.cpp b/validator/full-node.cpp index ebba50a0..5a822b26 100644 --- a/validator/full-node.cpp +++ b/validator/full-node.cpp @@ -50,6 +50,7 @@ void FullNodeImpl::add_permanent_key(PublicKeyHash key, td::Promise pr for (auto &shard : shards_) { td::actor::send_closure(shard.second, &FullNodeShard::update_validators, all_validators_, sign_cert_by_); } + create_private_block_overlay(key); promise.set_value(td::Unit()); } @@ -74,6 +75,7 @@ void FullNodeImpl::del_permanent_key(PublicKeyHash key, td::Promise pr for (auto &shard : shards_) { td::actor::send_closure(shard.second, &FullNodeShard::update_validators, all_validators_, sign_cert_by_); } + private_block_overlays_.erase(key); promise.set_value(td::Unit()); } @@ -179,6 +181,10 @@ void FullNodeImpl::send_shard_block_info(BlockIdExt block_id, CatchainSeqno cc_s VLOG(FULL_NODE_WARNING) << "dropping OUT shard block info message to unknown shard"; return; } + if (!private_block_overlays_.empty()) { + td::actor::send_closure(private_block_overlays_.begin()->second, &FullNodePrivateOverlay::send_shard_block_info, + block_id, cc_seqno, data.clone()); + } td::actor::send_closure(shard, &FullNodeShard::send_shard_block_info, block_id, cc_seqno, std::move(data)); } @@ -188,6 +194,10 @@ void FullNodeImpl::send_broadcast(BlockBroadcast broadcast) { VLOG(FULL_NODE_WARNING) << "dropping OUT broadcast to unknown shard"; return; } + if (!private_block_overlays_.empty()) { + td::actor::send_closure(private_block_overlays_.begin()->second, &FullNodePrivateOverlay::send_broadcast, + broadcast.clone()); + } td::actor::send_closure(shard, &FullNodeShard::send_broadcast, std::move(broadcast)); } @@ -289,6 +299,7 @@ void FullNodeImpl::got_key_block_proof(td::Ref proof) { PublicKeyHash l = PublicKeyHash::zero(); std::vector keys; + std::map current_validators; for (td::int32 i = -1; i <= 1; i++) { auto r = config->get_total_validator_set(i < 0 ? i : 1 - i); if (r.not_null()) { @@ -299,10 +310,18 @@ void FullNodeImpl::got_key_block_proof(td::Ref proof) { if (local_keys_.count(key)) { l = key; } + if (i == 1) { + current_validators[key] = adnl::AdnlNodeIdShort{el.addr.is_zero() ? key.bits256_value() : el.addr}; + } } } } + if (current_validators != current_validators_) { + current_validators_ = std::move(current_validators); + update_private_block_overlays(); + } + if (keys == all_validators_) { return; } @@ -321,6 +340,7 @@ void FullNodeImpl::got_zero_block_state(td::Ref state) { PublicKeyHash l = PublicKeyHash::zero(); std::vector keys; + std::map current_validators; for (td::int32 i = -1; i <= 1; i++) { auto r = m->get_total_validator_set(i < 0 ? i : 1 - i); if (r.not_null()) { @@ -331,10 +351,18 @@ void FullNodeImpl::got_zero_block_state(td::Ref state) { if (local_keys_.count(key)) { l = key; } + if (i == 1) { + current_validators[key] = adnl::AdnlNodeIdShort{el.addr.is_zero() ? key.bits256_value() : el.addr}; + } } } } + if (current_validators != current_validators_) { + current_validators_ = std::move(current_validators); + update_private_block_overlays(); + } + if (keys == all_validators_) { return; } @@ -456,6 +484,29 @@ void FullNodeImpl::start_up() { std::make_unique(actor_id(this)), std::move(P)); } +void FullNodeImpl::update_private_block_overlays() { + private_block_overlays_.clear(); + if (local_keys_.empty()) { + return; + } + for (const auto &key : local_keys_) { + create_private_block_overlay(key); + } +} + +void FullNodeImpl::create_private_block_overlay(PublicKeyHash key) { + CHECK(local_keys_.count(key)); + if (current_validators_.count(key)) { + std::vector nodes; + for (const auto &p : current_validators_) { + nodes.push_back(p.second); + } + private_block_overlays_[key] = td::actor::create_actor( + "BlocksPrivateOverlay", current_validators_[key], std::move(nodes), zero_state_file_hash_, config_, keyring_, + adnl_, rldp_, rldp2_, overlays_, validator_manager_); + } +} + FullNodeImpl::FullNodeImpl(PublicKeyHash local_id, adnl::AdnlNodeIdShort adnl_id, FileHash zero_state_file_hash, FullNodeConfig config, td::actor::ActorId keyring, td::actor::ActorId adnl, td::actor::ActorId rldp, diff --git a/validator/full-node.hpp b/validator/full-node.hpp index fc2dd75c..838700b5 100644 --- a/validator/full-node.hpp +++ b/validator/full-node.hpp @@ -23,6 +23,7 @@ //#include "ton-node-slave.h" #include "interfaces/proof.h" #include "interfaces/shard.h" +#include "full-node-private-overlay.hpp" #include #include @@ -111,9 +112,15 @@ class FullNodeImpl : public FullNode { PublicKeyHash sign_cert_by_; std::vector all_validators_; + std::map current_validators_; std::set local_keys_; FullNodeConfig config_; + + std::map> private_block_overlays_; + + void update_private_block_overlays(); + void create_private_block_overlay(PublicKeyHash key); }; } // namespace fullnode From cf83bd18933143da31a37c1e0d1d67f999d5f9ec Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Tue, 9 Jan 2024 15:49:42 +0300 Subject: [PATCH 48/71] Add note on highload wallet pecularities --- crypto/smartcont/highload-wallet-v2-code.fc | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/crypto/smartcont/highload-wallet-v2-code.fc b/crypto/smartcont/highload-wallet-v2-code.fc index 7dd65f9e..b7626bbe 100644 --- a/crypto/smartcont/highload-wallet-v2-code.fc +++ b/crypto/smartcont/highload-wallet-v2-code.fc @@ -3,6 +3,22 @@ ;; this version does not use seqno for replay protection; instead, it remembers all recent query_ids ;; in this way several external messages with different query_id can be sent in parallel + +;; Note, when dealing with highload-wallet the following limits need to be checked and taken into account: +;; 1) Storage size limit. Currently, size of contract storage should be less than 65535 cells. If size of +;; old_queries will grow above this limit, exception in ActionPhase will be thrown and transaction will fail. +;; Failed transaction may be replayed. +;; 2) Gas limit. Currently, gas limit is 1'000'000 gas units, that means that there is a limit of how much +;; old queries may be cleaned in one tx. If number of expired queries will be higher, contract will stuck. + +;; That means that it is not recommended to set too high expiration date: +;; number of queries during expiration timespan should not exceed 1000. +;; Also, number of expired queries cleaned in one transaction should be below 100. + +;; Such precautions are not easy to follow, so it is recommended to use highload contract +;; only when strictly necessary and the developer understands the above details. + + () recv_internal(slice in_msg) impure { ;; do nothing for internal messages } From bc7ea2af2da5a35867e726090c423f60f6e51463 Mon Sep 17 00:00:00 2001 From: Andrey Kravchenko Date: Wed, 10 Jan 2024 16:27:06 +0300 Subject: [PATCH 49/71] Fix pass the selected neighbor to the download proof (#854) * Fix pass the selected neighbor to the download proof. * Fix GetNextKeyBlocks as well Previously, the neighbor was choosed, but it was not passed to the DownloadProof class, as a result, in got_download_token we always get a random one from overlay, but after failure, bad statistics are recorded for the previously selected neighbor, which did not participate in this operation. --- validator/full-node-shard.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/validator/full-node-shard.cpp b/validator/full-node-shard.cpp index 0f495077..7c59a79c 100644 --- a/validator/full-node-shard.cpp +++ b/validator/full-node-shard.cpp @@ -795,7 +795,7 @@ void FullNodeShardImpl::download_block_proof_link(BlockIdExt block_id, td::uint3 td::Promise promise) { auto &b = choose_neighbour(); td::actor::create_actor("downloadproofreq", block_id, true, false, adnl_id_, overlay_id_, - adnl::AdnlNodeIdShort::zero(), priority, timeout, validator_manager_, rldp_, + b.adnl_id, priority, timeout, validator_manager_, rldp_, overlays_, adnl_, client_, create_neighbour_promise(b, std::move(promise))) .release(); } @@ -803,7 +803,7 @@ void FullNodeShardImpl::download_block_proof_link(BlockIdExt block_id, td::uint3 void FullNodeShardImpl::get_next_key_blocks(BlockIdExt block_id, td::Timestamp timeout, td::Promise> promise) { auto &b = choose_neighbour(); - td::actor::create_actor("next", block_id, 16, adnl_id_, overlay_id_, adnl::AdnlNodeIdShort::zero(), + td::actor::create_actor("next", block_id, 16, adnl_id_, overlay_id_, b.adnl_id, 1, timeout, validator_manager_, rldp_, overlays_, adnl_, client_, create_neighbour_promise(b, std::move(promise))) .release(); From 3a5f3fcaddc000a11edf6887fdc03c3f2580450e Mon Sep 17 00:00:00 2001 From: neodix42 Date: Thu, 11 Jan 2024 20:42:57 +0100 Subject: [PATCH 50/71] Update Docker build (#816) * Update create-release.yml minor test * Update Dockerfile * Update Dockerfile * Adjust Docker build for openssl-3 * clone recursively inside the action --- .github/workflows/docker-ubuntu-image.yml | 10 +++++---- docker/Dockerfile => Dockerfile | 27 +++++++++++++---------- 2 files changed, 21 insertions(+), 16 deletions(-) rename docker/Dockerfile => Dockerfile (65%) diff --git a/.github/workflows/docker-ubuntu-image.yml b/.github/workflows/docker-ubuntu-image.yml index ca754078..449711d8 100644 --- a/.github/workflows/docker-ubuntu-image.yml +++ b/.github/workflows/docker-ubuntu-image.yml @@ -1,4 +1,4 @@ -name: Docker Ubuntu 20.04 image +name: Docker Ubuntu 22.04 image on: workflow_dispatch: @@ -12,10 +12,12 @@ env: jobs: build-and-push: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - - name: Checkout + - name: Check out repository uses: actions/checkout@v3 + with: + submodules: 'recursive' - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -35,5 +37,5 @@ jobs: uses: docker/build-push-action@v2 with: push: true - context: ./docker + context: ./ tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest diff --git a/docker/Dockerfile b/Dockerfile similarity index 65% rename from docker/Dockerfile rename to Dockerfile index 595b4d87..e3cd7b26 100644 --- a/docker/Dockerfile +++ b/Dockerfile @@ -1,26 +1,29 @@ -FROM ubuntu:20.04 as builder +FROM ubuntu:22.04 as builder RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang-6.0 openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev pkg-config && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev pkg-config autoconf automake libtool && \ rm -rf /var/lib/apt/lists/* -ENV CC clang-6.0 -ENV CXX clang++-6.0 +ENV CC clang +ENV CXX clang++ ENV CCACHE_DISABLE 1 + WORKDIR / -RUN git clone --recursive https://github.com/ton-blockchain/ton +RUN mkdir ton WORKDIR /ton +COPY ./ ./ + RUN mkdir build && \ cd build && \ - cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DCMAKE_CXX_FLAGS="-mavx2" .. && \ + cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. && \ ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client -FROM ubuntu:20.04 +FROM ubuntu:22.04 RUN apt-get update && \ - apt-get install -y openssl wget libatomic1 && \ - rm -rf /var/lib/apt/lists/* + apt-get install -y wget libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev && \ + rm -rf /var/lib/apt/lists/* RUN mkdir -p /var/ton-work/db && \ - mkdir -p /var/ton-work/db/static + mkdir -p /var/ton-work/db/static COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon /usr/local/bin/ COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon-cli /usr/local/bin/ @@ -30,7 +33,7 @@ COPY --from=builder /ton/build/validator-engine-console/validator-engine-console COPY --from=builder /ton/build/utils/generate-random-id /usr/local/bin/ WORKDIR /var/ton-work/db -COPY init.sh control.template ./ +COPY ./docker/init.sh ./docker/control.template ./ RUN chmod +x init.sh -ENTRYPOINT ["/var/ton-work/db/init.sh"] +ENTRYPOINT ["/var/ton-work/db/init.sh"] \ No newline at end of file From ff40c1f2a0584408c45b2972e1a0ba99398af2f5 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Fri, 12 Jan 2024 12:34:28 +0300 Subject: [PATCH 51/71] Do not count gas on special accounts in block gas limits (enabled by config) (#856) * Set higher gas limit for special accounts, don't add gas from special accounts to block total * Make removing special accounts from block gas limits enabled by config --------- Co-authored-by: SpyCheese --- crypto/block/block.tlb | 5 +++++ crypto/block/mc-config.cpp | 7 ++++++- crypto/block/mc-config.h | 1 + crypto/block/transaction.cpp | 10 +++++++--- crypto/block/transaction.h | 10 +++------- validator/impl/collator.cpp | 6 ++++-- validator/impl/validate-query.cpp | 18 ++++++++++-------- 7 files changed, 36 insertions(+), 21 deletions(-) diff --git a/crypto/block/block.tlb b/crypto/block/block.tlb index 4b36f13b..eb1f8b94 100644 --- a/crypto/block/block.tlb +++ b/crypto/block/block.tlb @@ -696,6 +696,11 @@ gas_prices_ext#de gas_price:uint64 gas_limit:uint64 special_gas_limit:uint64 gas block_gas_limit:uint64 freeze_due_limit:uint64 delete_due_limit:uint64 = GasLimitsPrices; +// same fields as gas_prices_ext; behavior differs +gas_prices_v3#df gas_price:uint64 gas_limit:uint64 special_gas_limit:uint64 gas_credit:uint64 + block_gas_limit:uint64 freeze_due_limit:uint64 delete_due_limit:uint64 + = GasLimitsPrices; + gas_flat_pfx#d1 flat_gas_limit:uint64 flat_gas_price:uint64 other:GasLimitsPrices = GasLimitsPrices; diff --git a/crypto/block/mc-config.cpp b/crypto/block/mc-config.cpp index 08be5c88..51402142 100644 --- a/crypto/block/mc-config.cpp +++ b/crypto/block/mc-config.cpp @@ -654,11 +654,16 @@ td::Result Config::do_get_gas_limits_prices(td::Ref c res.delete_due_limit = r.delete_due_limit; }; block::gen::GasLimitsPrices::Record_gas_prices_ext rec; + block::gen::GasLimitsPrices::Record_gas_prices_v3 rec_v3; + vm::CellSlice cs0 = cs; if (tlb::unpack(cs, rec)) { f(rec, rec.special_gas_limit); + } else if (tlb::unpack(cs = cs0, rec_v3)) { + f(rec_v3, rec_v3.special_gas_limit); + res.special_full_limit = true; } else { block::gen::GasLimitsPrices::Record_gas_prices rec0; - if (tlb::unpack(cs, rec0)) { + if (tlb::unpack(cs = cs0, rec0)) { f(rec0, rec0.gas_limit); } else { return td::Status::Error(PSLICE() << "configuration parameter " << id diff --git a/crypto/block/mc-config.h b/crypto/block/mc-config.h index caab93f3..dcc48b4b 100644 --- a/crypto/block/mc-config.h +++ b/crypto/block/mc-config.h @@ -349,6 +349,7 @@ struct GasLimitsPrices { td::uint64 block_gas_limit{0}; td::uint64 freeze_due_limit{0}; td::uint64 delete_due_limit{0}; + bool special_full_limit{false}; td::RefInt256 compute_gas_price(td::uint64 gas_used) const; }; diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 7a907337..5955dd6e 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1039,8 +1039,12 @@ bool ComputePhaseConfig::parse_GasLimitsPrices_internal(Ref cs, t delete_due_limit = td::make_refint(r.delete_due_limit); }; block::gen::GasLimitsPrices::Record_gas_prices_ext rec; + block::gen::GasLimitsPrices::Record_gas_prices_v3 rec_v3; if (tlb::csr_unpack(cs, rec)) { f(rec, rec.special_gas_limit); + } else if (tlb::csr_unpack(cs, rec_v3)) { + f(rec_v3, rec_v3.special_gas_limit); + special_gas_full = true; } else { block::gen::GasLimitsPrices::Record_gas_prices rec0; if (tlb::csr_unpack(std::move(cs), rec0)) { @@ -1142,7 +1146,7 @@ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& cp.gas_max = cfg.gas_bought_for(balance.grams); } cp.gas_credit = 0; - if (trans_type != tr_ord) { + if (trans_type != tr_ord || (account.is_special && cfg.special_gas_full)) { // may use all gas that can be bought using remaining balance cp.gas_limit = cp.gas_max; } else { @@ -3203,8 +3207,8 @@ td::Result Transaction::estimate_block_storage_pro * * @returns True if the limits were successfully updated, False otherwise. */ -bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) const { - if (!(blimst.update_lt(end_lt) && blimst.update_gas(gas_used()))) { +bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_gas, bool with_size) const { + if (!(blimst.update_lt(end_lt) && blimst.update_gas(with_gas ? gas_used() : 0))) { return false; } if (with_size) { diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index d7cb95d1..deb71a40 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -104,6 +104,7 @@ struct ComputePhaseConfig { td::uint64 gas_credit; td::uint64 flat_gas_limit = 0; td::uint64 flat_gas_price = 0; + bool special_gas_full = false; static constexpr td::uint64 gas_infty = (1ULL << 63) - 1; td::RefInt256 gas_price256; td::RefInt256 max_gas_threshold; @@ -119,12 +120,7 @@ struct ComputePhaseConfig { SizeLimitsConfig size_limits; int vm_log_verbosity = 0; - ComputePhaseConfig(td::uint64 _gas_price = 0, td::uint64 _gas_limit = 0, td::uint64 _gas_credit = 0) - : gas_price(_gas_price), gas_limit(_gas_limit), special_gas_limit(_gas_limit), gas_credit(_gas_credit) { - compute_threshold(); - } - ComputePhaseConfig(td::uint64 _gas_price, td::uint64 _gas_limit, td::uint64 _spec_gas_limit, td::uint64 _gas_credit) - : gas_price(_gas_price), gas_limit(_gas_limit), special_gas_limit(_spec_gas_limit), gas_credit(_gas_credit) { + ComputePhaseConfig() : gas_price(0), gas_limit(0), special_gas_limit(0), gas_credit(0) { compute_threshold(); } void compute_threshold(); @@ -383,7 +379,7 @@ struct Transaction { td::Result estimate_block_storage_profile_incr( const vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const; - bool update_limits(block::BlockLimitStatus& blk_lim_st, bool with_size = true) const; + bool update_limits(block::BlockLimitStatus& blk_lim_st, bool with_gas = true, bool with_size = true) const; Ref commit(Account& _account); // _account should point to the same account LtCellRef extract_out_msg(unsigned i); diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 1d277048..1b7991f6 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -2667,7 +2667,8 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t return fatal_error(td::Status::Error( -666, std::string{"cannot serialize new transaction for smart contract "} + smc_addr.to_hex())); } - if (!trans->update_limits(*block_limit_status_)) { + if (!trans->update_limits(*block_limit_status_, + /* with_gas = */ !(acc->is_special && compute_phase_cfg_.special_gas_full))) { return fatal_error(-666, "cannot update block limit status to include the new transaction"); } if (trans->commit(*acc).is_null()) { @@ -2744,7 +2745,8 @@ Ref Collator::create_ordinary_transaction(Ref msg_root) { } std::unique_ptr trans = res.move_as_ok(); - if (!trans->update_limits(*block_limit_status_)) { + if (!trans->update_limits(*block_limit_status_, + /* with_gas = */ !(acc->is_special && compute_phase_cfg_.special_gas_full))) { fatal_error("cannot update block limit status to include the new transaction"); return {}; } diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index d5f489a3..0a22fa14 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -4904,13 +4904,6 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT int trans_type = block::transaction::Transaction::tr_none; switch (tag) { case block::gen::TransactionDescr::trans_ord: { - if (!block_limit_status_->fits(block::ParamLimits::cl_medium)) { - return reject_query(PSTRING() << "cannod add ordinary transaction because hard block limits are exceeded: " - << "gas_used=" << block_limit_status_->gas_used - << "(limit=" << block_limits_->gas.hard() << "), " - << "lt_delta=" << block_limit_status_->cur_lt - block_limits_->start_lt - << "(limit=" << block_limits_->lt_delta.hard() << ")"); - } trans_type = block::transaction::Transaction::tr_ord; if (in_msg_root.is_null()) { return reject_query(PSTRING() << "ordinary transaction " << lt << " of account " << addr.to_hex() @@ -5058,10 +5051,19 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT return reject_query(PSTRING() << "cannot re-create the serialization of transaction " << lt << " for smart contract " << addr.to_hex()); } - if (!trs->update_limits(*block_limit_status_, false)) { + if (!trs->update_limits(*block_limit_status_, + /* with_gas = */ !account.is_special, + /* with_size = */ false)) { return fatal_error(PSTRING() << "cannot update block limit status to include transaction " << lt << " of account " << addr.to_hex()); } + if (block_limit_status_->gas_used > block_limits_->gas.hard() + compute_phase_cfg_.gas_limit) { + // Note that block_limit_status_->gas_used does not include transactions in special accounts + return reject_query(PSTRING() << "gas block limits are exceeded: total_gas_used > gas_limit_hard + trx_gas_limit (" + << "total_gas_used=" << block_limit_status_->gas_used + << ", gas_limit_hard=" << block_limits_->gas.hard() + << ", trx_gas_limit=" << compute_phase_cfg_.gas_limit << ")"); + } auto trans_root2 = trs->commit(account); if (trans_root2.is_null()) { return reject_query(PSTRING() << "the re-created transaction " << lt << " for smart contract " << addr.to_hex() From 062b7b4a92dd67e32d963cf3f04b8bc97d8b7ed5 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 15 Jan 2024 11:39:43 +0300 Subject: [PATCH 52/71] Make 2023.12 release (#857) * Update recent_changelog.md for 2023.12 * Update Changelog.md for 2023.12 --- Changelog.md | 13 ++++++++++++- recent_changelog.md | 16 ++++++++-------- 2 files changed, 20 insertions(+), 9 deletions(-) diff --git a/Changelog.md b/Changelog.md index 440866c2..3310d6a2 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,4 +1,15 @@ -##2023.11 Update +## 2023.12 Update + +1. Optimized message queue handling, now queue cleaning speed doesn't depend on total queue size + * Cleaning delivered messages using lt augmentation instead of random search / consequtive walk + * Keeping root cell of queue message in memory until outdated (caching) +2. Changes to block collation/validation limits +3. Stop accepting new external message if message queue is overloaded +4. Introducing conditions for shard split/merge based on queue size + +Read [more](https://blog.ton.org/technical-report-december-5-inscriptions-launch-on-ton) on that update. + +## 2023.11 Update 1. New TVM Functionality. (Disabled by default) 2. A series of emulator improvements: libraries support, higher max stack size, etc diff --git a/recent_changelog.md b/recent_changelog.md index 5de7aed7..5e122454 100644 --- a/recent_changelog.md +++ b/recent_changelog.md @@ -1,10 +1,10 @@ -##2023.11 Update +## 2023.12 Update -1. New TVM Functionality. (Disabled by default) -2. A series of emulator improvements: libraries support, higher max stack size, etc -3. A series of tonlib and tonlib-cli improvements: wallet-v4 support, getconfig, showtransactions, etc -4. Changes to public libraries: now contract can not publish more than 256 libraries (config parameter) and contracts can not be deployed with public libraries in initstate (instead contracts need explicitly publish all libraries) -5. Changes to storage due payment: now due payment is collected in Storage Phase, however for bouncable messages fee amount can not exceed balance of account prior to message. +1. Optimized message queue handling, now queue cleaning speed doesn't depend on total queue size + * Cleaning delivered messages using lt augmentation instead of random search / consequtive walk + * Keeping root cell of queue message in memory until outdated (caching) +2. Changes to block collation/validation limits +3. Stop accepting new external message if message queue is overloaded +4. Introducing conditions for shard split/merge based on queue size - -Besides the work of the core team, this update is based on the efforts of @aleksej-paschenko (emulator improvements), @akifoq (security improvements), Trail of Bits auditor as well as all participants of [TEP-88 discussion](https://github.com/ton-blockchain/TEPs/pull/88). +Read [more](https://blog.ton.org/technical-report-december-5-inscriptions-launch-on-ton) on that update. From 388c8a6d862da5df6e4a89fad135becb45d283ca Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Mon, 15 Jan 2024 23:43:11 +0300 Subject: [PATCH 53/71] Increase gas limit for a specific wallet (enabled by config) (#859) --- crypto/block/transaction.cpp | 94 ++++++++++++++++++++++++++++--- crypto/block/transaction.h | 3 + validator/impl/validate-query.cpp | 7 ++- 3 files changed, 95 insertions(+), 9 deletions(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 5955dd6e..b4bd648b 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1081,6 +1081,25 @@ bool ComputePhaseConfig::is_address_suspended(ton::WorkchainId wc, td::Bits256 a } } +/** + * Computes the maximum gas fee based on the gas prices and limits. + * + * @param gas_price256 The gas price from config as RefInt256 + * @param gas_limit The gas limit from config + * @param flat_gas_limit The flat gas limit from config + * @param flat_gas_price The flat gas price from config + * + * @returns The maximum gas fee. + */ +static td::RefInt256 compute_max_gas_threshold(const td::RefInt256& gas_price256, td::uint64 gas_limit, + td::uint64 flat_gas_limit, td::uint64 flat_gas_price) { + if (gas_limit > flat_gas_limit) { + return td::rshift(gas_price256 * (gas_limit - flat_gas_limit), 16, 1) + td::make_bigint(flat_gas_price); + } else { + return td::make_refint(flat_gas_price); + } +} + /** * Computes the maximum for gas fee based on the gas prices and limits. * @@ -1088,12 +1107,7 @@ bool ComputePhaseConfig::is_address_suspended(ton::WorkchainId wc, td::Bits256 a */ void ComputePhaseConfig::compute_threshold() { gas_price256 = td::make_refint(gas_price); - if (gas_limit > flat_gas_limit) { - max_gas_threshold = - td::rshift(gas_price256 * (gas_limit - flat_gas_limit), 16, 1) + td::make_bigint(flat_gas_price); - } else { - max_gas_threshold = td::make_refint(flat_gas_price); - } + max_gas_threshold = compute_max_gas_threshold(gas_price256, gas_limit, flat_gas_limit, flat_gas_price); } /** @@ -1130,6 +1144,67 @@ td::RefInt256 ComputePhaseConfig::compute_gas_price(td::uint64 gas_used) const { } namespace transaction { + +/** + * Checks if it is required to increase gas_limit (from GasLimitsPrices config) to special_gas_limit * 2 + * from masterchain GasLimitsPrices config for the transaction. + * + * In January 2024 a highload wallet of @wallet Telegram bot in mainnet was stuck because current gas limit (1M) is + * not enough to clean up old queires, thus locking funds inside. + * See comment in crypto/smartcont/highload-wallet-v2-code.fc for details on why this happened. + * Account address: EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu + * It was proposed to validators to increase gas limit for this account for a limited amount of time (until 2024-02-16). + * It is activated by setting gas_prices_v3 in ConfigParam 20 (config_mc_gas_prices). + * This config change also activates new behavior for special accounts in masterchain. + * + * @param cfg The compute phase configuration. + * @param now The Unix time of the transaction. + * @param account The account of the transaction. + * + * @returns True if gas_limit override is required, false otherwise + */ +static bool override_gas_limit(const ComputePhaseConfig& cfg, ton::UnixTime now, const Account& account) { + if (!cfg.mc_gas_prices.special_full_limit) { + return false; + } + ton::UnixTime until = 1708041600; // 2024-02-16 00:00:00 UTC + ton::WorkchainId wc = 0; + const char* addr_hex = "FFBFD8F5AE5B2E1C7C3614885CB02145483DFAEE575F0DD08A72C366369211CD"; + return now < until && account.workchain == wc && account.addr.to_hex() == addr_hex; +} + +/** + * Computes the amount of gas that can be bought for a given amount of nanograms. + * Usually equal to `cfg.gas_bought_for(nanograms)` + * However, it overrides gas_limit from config in special cases. + * + * @param cfg The compute phase configuration. + * @param nanograms The amount of nanograms to compute gas for. + * + * @returns The amount of gas. + */ +td::uint64 Transaction::gas_bought_for(const ComputePhaseConfig& cfg, td::RefInt256 nanograms) { + if (override_gas_limit(cfg, now, account)) { + gas_limit_overridden = true; + // Same as ComputePhaseConfig::gas_bought for, but with other gas_limit and max_gas_threshold + auto gas_limit = cfg.mc_gas_prices.special_gas_limit * 2; + auto max_gas_threshold = + compute_max_gas_threshold(cfg.gas_price256, gas_limit, cfg.flat_gas_limit, cfg.flat_gas_price); + if (nanograms.is_null() || sgn(nanograms) < 0) { + return 0; + } + if (nanograms >= max_gas_threshold) { + return gas_limit; + } + if (nanograms < cfg.flat_gas_price) { + return 0; + } + auto res = td::div((std::move(nanograms) - cfg.flat_gas_price) << 16, cfg.gas_price256); + return res->to_long() + cfg.flat_gas_limit; + } + return cfg.gas_bought_for(nanograms); +} + /** * Computes the gas limits for a transaction. * @@ -1143,7 +1218,7 @@ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& if (account.is_special) { cp.gas_max = cfg.special_gas_limit; } else { - cp.gas_max = cfg.gas_bought_for(balance.grams); + cp.gas_max = gas_bought_for(cfg, balance.grams); } cp.gas_credit = 0; if (trans_type != tr_ord || (account.is_special && cfg.special_gas_full)) { @@ -1152,7 +1227,7 @@ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& } else { // originally use only gas bought using remaining message balance // if the message is "accepted" by the smart contract, the gas limit will be set to gas_max - cp.gas_limit = std::min(cfg.gas_bought_for(msg_balance_remaining.grams), cp.gas_max); + cp.gas_limit = std::min(gas_bought_for(cfg, msg_balance_remaining.grams), cp.gas_max); if (!block::tlb::t_Message.is_internal(in_msg)) { // external messages carry no balance, give them some credit to check whether they are accepted cp.gas_credit = std::min(cfg.gas_credit, cp.gas_max); @@ -3454,6 +3529,9 @@ td::Status FetchConfigParams::fetch_config_params( storage_phase_cfg->delete_due_limit)) { return td::Status::Error(-668, "cannot unpack current gas prices and limits from masterchain configuration"); } + TRY_RESULT_PREFIX(mc_gas_prices, config.get_gas_limits_prices(true), + "cannot unpack masterchain gas prices and limits: "); + compute_phase_cfg->mc_gas_prices = std::move(mc_gas_prices); storage_phase_cfg->enable_due_payment = config.get_global_version() >= 4; compute_phase_cfg->block_rand_seed = *rand_seed; compute_phase_cfg->max_vm_data_depth = size_limits.max_vm_data_depth; diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index deb71a40..7539efe0 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -105,6 +105,7 @@ struct ComputePhaseConfig { td::uint64 flat_gas_limit = 0; td::uint64 flat_gas_price = 0; bool special_gas_full = false; + block::GasLimitsPrices mc_gas_prices; static constexpr td::uint64 gas_infty = (1ULL << 63) - 1; td::RefInt256 gas_price256; td::RefInt256 max_gas_threshold; @@ -358,12 +359,14 @@ struct Transaction { std::unique_ptr action_phase; std::unique_ptr bounce_phase; vm::CellStorageStat new_storage_stat; + bool gas_limit_overridden{false}; Transaction(const Account& _account, int ttype, ton::LogicalTime req_start_lt, ton::UnixTime _now, Ref _inmsg = {}); bool unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* cfg); bool check_in_msg_state_hash(); bool prepare_storage_phase(const StoragePhaseConfig& cfg, bool force_collect = true, bool adjust_msg_value = false); bool prepare_credit_phase(); + td::uint64 gas_bought_for(const ComputePhaseConfig& cfg, td::RefInt256 nanograms); bool compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& cfg); Ref prepare_vm_stack(ComputePhase& cp); std::vector> compute_vm_libraries(const ComputePhaseConfig& cfg); diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 0a22fa14..94eded6c 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -940,6 +940,11 @@ bool ValidateQuery::fetch_config_params() { storage_phase_cfg_.delete_due_limit)) { return fatal_error("cannot unpack current gas prices and limits from masterchain configuration"); } + auto mc_gas_prices = config_->get_gas_limits_prices(true); + if (mc_gas_prices.is_error()) { + return fatal_error(mc_gas_prices.move_as_error_prefix("cannot unpack masterchain gas prices and limits: ")); + } + compute_phase_cfg_.mc_gas_prices = mc_gas_prices.move_as_ok(); storage_phase_cfg_.enable_due_payment = config_->get_global_version() >= 4; compute_phase_cfg_.block_rand_seed = rand_seed_; compute_phase_cfg_.libraries = std::make_unique(config_->get_libraries_root(), 256); @@ -5052,7 +5057,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << " for smart contract " << addr.to_hex()); } if (!trs->update_limits(*block_limit_status_, - /* with_gas = */ !account.is_special, + /* with_gas = */ !account.is_special && !trs->gas_limit_overridden, /* with_size = */ false)) { return fatal_error(PSTRING() << "cannot update block limit status to include transaction " << lt << " of account " << addr.to_hex()); From e0a320f715a50a37ce177592e324e6044440d033 Mon Sep 17 00:00:00 2001 From: neodix42 Date: Mon, 15 Jan 2024 21:48:04 +0100 Subject: [PATCH 54/71] Improve TON build scripts and some tests (#855) * fix macOS github actions * fix android tonlib GH action; * fixing wasm GH action * strip binaries * fix randomly failing ubuntu and wasm GH actions * fix randomly failing ubuntu and wasm GH actions * revert some changes * adding more nix scripts and automated native build scripts; debug static ton compilation * minor fix * do not use pkg_config if path specified * move wasm script, run with sudo action script * weird, fixing - cp: missing destination file operand after 'assembly/native/build-ubuntu-20.04-shared.sh' * weird, fixing - cp: missing destination file operand after 'assembly/native/build-ubuntu-20.04-shared.sh' * ok * some adjustments for android and win builds * some adjustments for android and win builds * moving stripping inside the build script * access rights handling; adding simple binaries' tests * make lite-client-common, fift-lib and smc-envelope deliberately static; add -a (artifacts) flag to build scripts; * minor wasm build fix; create separate tonlib android build script; remove outdated __has_trivial_copy(T) * add windows build - WIP * adjust android build; improve win build; * adjust sodium paths for android build; use proper compiler for windows build; * add github windows build auxiliary file * adjust wasm build * add portable ubuntu build * exclude some unstable tests for some time * compile portable binaries on ubuntu-20.04 * exclude some unstable tests * include static gsl * restart builds * restart builds * restart builds * remove libreadline, gsl and blas dependencies in linux build * add macos build script * install missing autoconf in macos builds * enable all tests and see what fails * enable win tests and restart others * enable win tests and fix test-smartcont.cpp * enable win tests * use clang-16 on mac builds, add blockchain-explorer for ubuntu builds, add portable macos build * move sudo part outside a build scripts * move sudo part outside a build scripts * run llvm install with sudo * remove libgnutls28-dev before ubuntu static compilation, include blockchain-explorer into artifacts; remove warning: definition of implicit copy constructor for 'Stat' is deprecated because it has a user-declared copy assignment operator [-Wdeprecated-copy] * rework wrong decision, put back system gnutls, but compile libmicrohttpd with --disable-https * add jenkins pipeline sceleton * WIP jenkins pipeline sceleton * WIP jenkins pipeline changes * WIP jenkins pipeline: add stage timout, zip and group artifacts * WIP jenkins pipeline: macos portable build fix * WIP jenkins pipeline: wording * WIP jenkins pipeline: add android tonlib * WIP jenkins pipeline: add was binaries * WIP jenkins pipeline: add TOTAL_MEMORY 1.5gb to funcfiftlib wasm linking * WIP jenkins pipeline: add nix build on linux aarch64 * WIP jenkins pipeline: funcfiftlib compilation fails that 16mb mem is not enough, increase to 32mb * WIP jenkins pipeline: enable test in nix build * WIP jenkins pipeline: add linux x86-64 nix build * WIP jenkins pipeline: include libs in nix build * WIP jenkins pipeline: include libs in nix build * WIP jenkins pipeline: include mac nix build * WIP jenkins pipeline: include mac nix build * WIP jenkins pipeline: include mac nix build * WIP jenkins pipeline: include mac nix build * WIP jenkins pipeline: include mac nix build * WIP jenkins pipeline: include mac nix build * WIP jenkins pipeline: nix linux arm64 with openssl 1.1 for now * WIP jenkins pipeline: working ubuntu arm64 libtonjson * WIP jenkins pipeline: working ubuntu arm64 libtonjson + minor fix * WIP jenkins pipeline: working ubuntu arm64 libtonjson + minor fix 2 * WIP jenkins pipeline: merry christmas * WIP jenkins pipeline: merry christmas 2 * WIP jenkins pipeline: remove native static builds * WIP jenkins pipeline: enable more tests * WIP jenkins pipeline: zip artifacts better * WIP jenkins pipeline: get rid of path in the final zip * WIP jenkins pipeline: minor fix, include lib and smartcont folders * WIP jenkins pipeline: minor fix, include lib and smartcont folders into nix artifacts also * WIP jenkins pipeline: minor fix * WIP jenkins pipeline: minor fix * adjust github actions for new nix builds * cleanup * cleanup * cleanup * cleanup * rename libtonlibjson.so.0.5 to libtonlibjson.so * Add TON build instructions to README.md * simplify * fix test-tonlib-offline * set timeout per test of 300 sec * set timeout per test of 600 sec for non nix builds * increase test timeout to 900 sec; minor changes * use MS VS 2022 for win TON compilation; update README.md * use MS VS 2022 for win TON compilation; update README.md * change path to MSVC in github workflow * change path to MSVC in groovy pipeline * compile ton on win, with msvc 2022 community and enterprise versions * minor fixes * improve network tests * remove TON compilation against macos-11 github runner * add `choco feature enable -n allowEmptyChecksums` since pkg-config-lite-0.28-1 does not have a checksum * abort win compilation if 3pp can't be downloaded * increase test timeout to 30 min * improving test-catchain --- .../build-ton-linux-android-tonlib.yml | 32 +++ .../build-ton-linux-x86-64-shared.yml | 40 +++ .../build-ton-macos-x86-64-shared.yml | 25 ++ .../workflows/build-ton-wasm-emscripten.yml | 30 +++ .github/workflows/create-release.yml | 6 +- .github/workflows/macos-11.7-compile.yml | 106 -------- .github/workflows/macos-12.6-compile.yml | 106 -------- .github/workflows/ton-aarch64-linux.yml | 50 ---- .github/workflows/ton-aarch64-macos.yml | 47 ---- .github/workflows/ton-ccpcheck.yml | 3 +- .github/workflows/ton-wasm-emscripten.yml | 46 ---- .github/workflows/ton-x86-64-linux.yml | 18 +- .github/workflows/ton-x86-64-macos.yml | 18 +- .github/workflows/ton-x86-64-windows.yml | 34 +++ .github/workflows/tonlib-android-jni.yml | 61 ----- .github/workflows/ubuntu-22.04-compile.yml | 78 ------ .github/workflows/ubuntu-compile.yml | 81 ------ .github/workflows/win-2019-compile.yml | 108 -------- .gitignore | 12 +- CMake/FindMHD.cmake | 29 ++- CMake/FindSecp256k1.cmake | 27 +- CMake/FindSodium.cmake | 1 + CMakeLists.txt | 19 +- README.md | 88 ++++++- assembly/android/build-android-tonlib.sh | 55 ++++ assembly/cicd/jenkins/test-builds.groovy | 236 ++++++++++++++++++ assembly/native/build-macos-portable.sh | 207 +++++++++++++++ assembly/native/build-macos-shared.sh | 136 ++++++++++ assembly/native/build-ubuntu-portable.sh | 198 +++++++++++++++ assembly/native/build-ubuntu-shared.sh | 122 +++++++++ assembly/native/build-windows-github.bat | 2 + assembly/native/build-windows.bat | 193 ++++++++++++++ assembly/nix/build-linux-arm64-nix.sh | 20 ++ assembly/nix/build-linux-x86-64-nix.sh | 20 ++ assembly/nix/build-macos-nix.sh | 17 ++ flake.lock => assembly/nix/flakes/flake.lock | 0 flake.nix => assembly/nix/flakes/flake.nix | 0 shell.nix => assembly/nix/flakes/shell.nix | 0 assembly/nix/linux-arm64-static.nix | 45 ++++ assembly/nix/linux-arm64-tonlib.nix | 44 ++++ assembly/nix/linux-x86-64-static.nix | 45 ++++ assembly/nix/linux-x86-64-tonlib.nix | 54 ++++ assembly/nix/macos-static.nix | 65 +++++ assembly/nix/macos-tonlib.nix | 55 ++++ assembly/nix/microhttpd.nix | 28 +++ assembly/nix/openssl.nix | 30 +++ .../wasm}/fift-func-wasm-build-ubuntu.sh | 53 +++- blockchain-explorer/CMakeLists.txt | 27 +- catchain/catchain-receiver.cpp | 4 +- crypto/CMakeLists.txt | 24 +- crypto/test/test-smartcont.cpp | 5 +- crypto/vm/boc.h | 1 + example/android/README.md | 24 +- example/android/build-all.sh | 2 + example/android/build.sh | 22 +- lite-client/CMakeLists.txt | 2 +- tdnet/td/net/TcpListener.cpp | 8 +- tdnet/test/net-test.cpp | 6 +- tdutils/td/utils/as.h | 5 - test/test-catchain.cpp | 9 +- tonlib/test/offline.cpp | 19 +- tonlib/tonlib/TonlibClient.cpp | 2 + 62 files changed, 2019 insertions(+), 831 deletions(-) create mode 100644 .github/workflows/build-ton-linux-android-tonlib.yml create mode 100644 .github/workflows/build-ton-linux-x86-64-shared.yml create mode 100644 .github/workflows/build-ton-macos-x86-64-shared.yml create mode 100644 .github/workflows/build-ton-wasm-emscripten.yml delete mode 100644 .github/workflows/macos-11.7-compile.yml delete mode 100644 .github/workflows/macos-12.6-compile.yml delete mode 100644 .github/workflows/ton-aarch64-linux.yml delete mode 100644 .github/workflows/ton-aarch64-macos.yml delete mode 100644 .github/workflows/ton-wasm-emscripten.yml create mode 100644 .github/workflows/ton-x86-64-windows.yml delete mode 100644 .github/workflows/tonlib-android-jni.yml delete mode 100644 .github/workflows/ubuntu-22.04-compile.yml delete mode 100644 .github/workflows/ubuntu-compile.yml delete mode 100644 .github/workflows/win-2019-compile.yml create mode 100644 assembly/android/build-android-tonlib.sh create mode 100644 assembly/cicd/jenkins/test-builds.groovy create mode 100644 assembly/native/build-macos-portable.sh create mode 100644 assembly/native/build-macos-shared.sh create mode 100644 assembly/native/build-ubuntu-portable.sh create mode 100644 assembly/native/build-ubuntu-shared.sh create mode 100644 assembly/native/build-windows-github.bat create mode 100644 assembly/native/build-windows.bat create mode 100644 assembly/nix/build-linux-arm64-nix.sh create mode 100644 assembly/nix/build-linux-x86-64-nix.sh create mode 100644 assembly/nix/build-macos-nix.sh rename flake.lock => assembly/nix/flakes/flake.lock (100%) rename flake.nix => assembly/nix/flakes/flake.nix (100%) rename shell.nix => assembly/nix/flakes/shell.nix (100%) create mode 100644 assembly/nix/linux-arm64-static.nix create mode 100644 assembly/nix/linux-arm64-tonlib.nix create mode 100644 assembly/nix/linux-x86-64-static.nix create mode 100644 assembly/nix/linux-x86-64-tonlib.nix create mode 100644 assembly/nix/macos-static.nix create mode 100644 assembly/nix/macos-tonlib.nix create mode 100644 assembly/nix/microhttpd.nix create mode 100644 assembly/nix/openssl.nix rename {.github/script => assembly/wasm}/fift-func-wasm-build-ubuntu.sh (71%) mode change 100755 => 100644 diff --git a/.github/workflows/build-ton-linux-android-tonlib.yml b/.github/workflows/build-ton-linux-android-tonlib.yml new file mode 100644 index 00000000..ae1be22f --- /dev/null +++ b/.github/workflows/build-ton-linux-android-tonlib.yml @@ -0,0 +1,32 @@ +name: Tonlib Android + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + runs-on: ubuntu-22.04 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Install system libraries + run: | + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build automake libtool texinfo autoconf libgflags-dev \ + zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev \ + libtool autoconf libsodium-dev libsecp256k1-dev + + - name: Build TON + run: | + cp assembly/android/build-android-tonlib.sh . + chmod +x build-android-tonlib.sh + ./build-android-tonlib.sh -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: tonlib-android + path: artifacts \ No newline at end of file diff --git a/.github/workflows/build-ton-linux-x86-64-shared.yml b/.github/workflows/build-ton-linux-x86-64-shared.yml new file mode 100644 index 00000000..166de660 --- /dev/null +++ b/.github/workflows/build-ton-linux-x86-64-shared.yml @@ -0,0 +1,40 @@ +name: Ubuntu TON build (shared, x86-64) + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04, ubuntu-22.04] + runs-on: ${{ matrix.os }} + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Install system libraries + run: | + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + - name: Install clang-16 + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 16 all + + - name: Build TON + run: | + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh -t -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-binaries-${{ matrix.os }} + path: artifacts diff --git a/.github/workflows/build-ton-macos-x86-64-shared.yml b/.github/workflows/build-ton-macos-x86-64-shared.yml new file mode 100644 index 00000000..c9331e3b --- /dev/null +++ b/.github/workflows/build-ton-macos-x86-64-shared.yml @@ -0,0 +1,25 @@ +name: MacOS TON build (shared, x86-64) + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + runs-on: macos-12 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Build TON + run: | + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-binaries-macos-12 + path: artifacts diff --git a/.github/workflows/build-ton-wasm-emscripten.yml b/.github/workflows/build-ton-wasm-emscripten.yml new file mode 100644 index 00000000..16156b07 --- /dev/null +++ b/.github/workflows/build-ton-wasm-emscripten.yml @@ -0,0 +1,30 @@ +name: Emscripten TON build (wasm) + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + runs-on: ubuntu-22.04 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Install system libraries + run: | + sudo apt-get update + sudo apt-get install -y build-essential git openssl cmake ninja-build zlib1g-dev libssl-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + - name: Build TON WASM artifacts + run: | + cd assembly/wasm + chmod +x fift-func-wasm-build-ubuntu.sh + ./fift-func-wasm-build-ubuntu.sh -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-wasm-binaries + path: artifacts \ No newline at end of file diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml index 10c20ac0..3d248cfe 100644 --- a/.github/workflows/create-release.yml +++ b/.github/workflows/create-release.yml @@ -46,7 +46,7 @@ jobs: - name: Download Windows artifacts uses: dawidd6/action-download-artifact@v2 with: - workflow: win-2019-compile.yml + workflow: ton-x86-64-windows.yml path: artifacts workflow_conclusion: success skip_unpack: true @@ -54,7 +54,7 @@ jobs: - name: Download and unzip Windows artifacts uses: dawidd6/action-download-artifact@v2 with: - workflow: win-2019-compile.yml + workflow: ton-x86-64-windows.yml path: artifacts workflow_conclusion: success skip_unpack: false @@ -62,7 +62,7 @@ jobs: - name: Download WASM artifacts uses: dawidd6/action-download-artifact@v2 with: - workflow: ton-wasm-emscripten.yml + workflow: build-ton-wasm-emscripten.yml path: artifacts workflow_conclusion: success skip_unpack: true diff --git a/.github/workflows/macos-11.7-compile.yml b/.github/workflows/macos-11.7-compile.yml deleted file mode 100644 index eb12db1b..00000000 --- a/.github/workflows/macos-11.7-compile.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: MacOS 11.7 Big Sur x86-64 Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: macos-11 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Compile Secp256k1 - run: | - export NONINTERACTIVE=1 - brew install ninja secp256k1 libsodium libmicrohttpd pkg-config automake libtool - git clone https://github.com/libbitcoin/secp256k1.git - cd secp256k1 - ./autogen.sh - ./configure --enable-module-recovery - make - make install - - - name: Build all - run: | - brew unlink openssl@1.1 - brew install openssl@3 - brew unlink openssl@3 && brew link --overwrite openssl@3 - rootPath=`pwd` - mkdir build - cd build - cmake -GNinja -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. - - ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine \ - lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ - http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ - test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ - test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - - - name: Strip binaries - run: | - strip build/storage/storage-daemon/storage-daemon - strip build/storage/storage-daemon/storage-daemon-cli - strip build/blockchain-explorer/blockchain-explorer - strip build/crypto/fift - strip build/crypto/func - strip build/crypto/create-state - strip build/crypto/tlbc - strip build/validator-engine-console/validator-engine-console - strip build/tonlib/tonlib-cli - strip build/http/http-proxy - strip build/rldp-http-proxy/rldp-http-proxy - strip build/dht-server/dht-server - strip build/lite-client/lite-client - strip build/validator-engine/validator-engine - strip build/utils/generate-random-id - strip build/utils/json2tlo - strip build/adnl/adnl-proxy - - - name: Run tests - run: | - cd build - ctest --output-on-failure -E "test-catchain|test-actors" - - - name: Find & copy binaries - run: | - mkdir artifacts - cp build/storage/storage-daemon/storage-daemon artifacts/ - cp build/storage/storage-daemon/storage-daemon-cli artifacts/ - cp build/blockchain-explorer/blockchain-explorer artifacts/ - cp build/crypto/fift artifacts/ - cp build/crypto/func artifacts/ - cp build/crypto/create-state artifacts/ - cp build/crypto/tlbc artifacts/ - cp build/validator-engine-console/validator-engine-console artifacts/ - cp build/tonlib/tonlib-cli artifacts/ - cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib - cp build/http/http-proxy artifacts/ - cp build/rldp-http-proxy/rldp-http-proxy artifacts/ - cp build/dht-server/dht-server artifacts/ - cp build/lite-client/lite-client artifacts/ - cp build/validator-engine/validator-engine artifacts/ - cp build/utils/generate-random-id artifacts/ - cp build/utils/json2tlo artifacts/ - cp build/adnl/adnl-proxy artifacts/ - cp build/emulator/*emulator.* artifacts/ - chmod +x artifacts/* - rsync -r crypto/smartcont artifacts/ - rsync -r crypto/fift/lib artifacts/ - ls -laRt artifacts - - - name: Simple binaries test - run: | - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-macos-11.7 - path: artifacts diff --git a/.github/workflows/macos-12.6-compile.yml b/.github/workflows/macos-12.6-compile.yml deleted file mode 100644 index f41efc66..00000000 --- a/.github/workflows/macos-12.6-compile.yml +++ /dev/null @@ -1,106 +0,0 @@ -name: MacOS 12.6 Monterey x86-64 Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: macos-12 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Compile Secp256k1 - run: | - export NONINTERACTIVE=1 - brew install ninja secp256k1 libsodium libmicrohttpd pkg-config automake libtool - git clone https://github.com/libbitcoin/secp256k1.git - cd secp256k1 - ./autogen.sh - ./configure --enable-module-recovery - make - make install - - - name: Build all - run: | - brew unlink openssl@1.1 - brew install openssl@3 - brew unlink openssl@3 && brew link --overwrite openssl@3 - rootPath=`pwd` - mkdir build - cd build - cmake -GNinja -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=12.6 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. - - ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli \ - validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ - http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ - test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ - test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - - - name: Strip binaries - run: | - strip build/storage/storage-daemon/storage-daemon - strip build/storage/storage-daemon/storage-daemon-cli - strip build/blockchain-explorer/blockchain-explorer - strip build/crypto/fift - strip build/crypto/func - strip build/crypto/create-state - strip build/crypto/tlbc - strip build/validator-engine-console/validator-engine-console - strip build/tonlib/tonlib-cli - strip build/http/http-proxy - strip build/rldp-http-proxy/rldp-http-proxy - strip build/dht-server/dht-server - strip build/lite-client/lite-client - strip build/validator-engine/validator-engine - strip build/utils/generate-random-id - strip build/utils/json2tlo - strip build/adnl/adnl-proxy - - - name: Run tests - run: | - cd build - ctest --output-on-failure -E "test-catchain|test-actors" - - - name: Find & copy binaries - run: | - mkdir artifacts - cp build/storage/storage-daemon/storage-daemon artifacts/ - cp build/storage/storage-daemon/storage-daemon-cli artifacts/ - cp build/blockchain-explorer/blockchain-explorer artifacts/ - cp build/crypto/fift artifacts/ - cp build/crypto/func artifacts/ - cp build/crypto/create-state artifacts/ - cp build/crypto/tlbc artifacts/ - cp build/validator-engine-console/validator-engine-console artifacts/ - cp build/tonlib/tonlib-cli artifacts/ - cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib - cp build/http/http-proxy artifacts/ - cp build/rldp-http-proxy/rldp-http-proxy artifacts/ - cp build/dht-server/dht-server artifacts/ - cp build/lite-client/lite-client artifacts/ - cp build/validator-engine/validator-engine artifacts/ - cp build/utils/generate-random-id artifacts/ - cp build/utils/json2tlo artifacts/ - cp build/adnl/adnl-proxy artifacts/ - cp build/emulator/*emulator.* artifacts/ - chmod +x artifacts/* - rsync -r crypto/smartcont artifacts/ - rsync -r crypto/fift/lib artifacts/ - ls -laRt artifacts - - - name: Simple binaries test - run: | - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-macos-12.6 - path: artifacts diff --git a/.github/workflows/ton-aarch64-linux.yml b/.github/workflows/ton-aarch64-linux.yml deleted file mode 100644 index 3c600bee..00000000 --- a/.github/workflows/ton-aarch64-linux.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: "TON aarch64 Linux binaries" - -on: [workflow_dispatch,workflow_call] - -jobs: - build: - runs-on: ubuntu-22.04 - - steps: - - run: | - sudo apt update - sudo apt install -y apt-utils - sudo apt install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static - - - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - uses: cachix/install-nix-action@v18 - with: - extra_nix_config: | - access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - - name: Compile - run: nix build .?submodules=1#packages.aarch64-linux.ton-oldglibc_staticbinaries --print-build-logs --system aarch64-linux -o result-aarch64 - - - name: Copy binaries - run: | - ls -lart - mkdir artifacts - cp $PWD/result-aarch64-linux/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-aarch64-linux/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so - cp $PWD/result-aarch64-linux/lib/libemulator.so artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ - - - name: Simple binaries test - run: | - sudo mv /nix/store /nix/store2 - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-aarch64-linux-binaries - path: artifacts \ No newline at end of file diff --git a/.github/workflows/ton-aarch64-macos.yml b/.github/workflows/ton-aarch64-macos.yml deleted file mode 100644 index 75fcec78..00000000 --- a/.github/workflows/ton-aarch64-macos.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: "TON aarch64 macOS binaries" - -on: [workflow_dispatch,workflow_call] - -jobs: - build: - runs-on: macos-12 - - steps: - - run: brew install qemu - - - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - uses: cachix/install-nix-action@v18 - with: - extra_nix_config: | - access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - - name: Compile - run: nix build .?submodules=1#packages.aarch64-darwin.ton-staticbin-dylib --print-build-logs -o result-aarch64-darwin - - - name: Copy binaries - run: | - ls -lart - mkdir artifacts - cp $PWD/result-aarch64-darwin/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-aarch64-darwin/lib/libtonlibjson* artifacts/ - cp $PWD/result-aarch64-darwin/lib/libemulator* artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ - - - name: Simple binaries test - run: | - sudo mv /nix/store /nix/store2 - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-aarch64-macos-binaries - path: artifacts diff --git a/.github/workflows/ton-ccpcheck.yml b/.github/workflows/ton-ccpcheck.yml index 8e9d6ad2..d2d8cf70 100644 --- a/.github/workflows/ton-ccpcheck.yml +++ b/.github/workflows/ton-ccpcheck.yml @@ -1,10 +1,9 @@ -name: TON Ccpcheck +name: TON Static Code Analysis on: [push,workflow_dispatch,workflow_call] jobs: build: - runs-on: ubuntu-22.04 steps: diff --git a/.github/workflows/ton-wasm-emscripten.yml b/.github/workflows/ton-wasm-emscripten.yml deleted file mode 100644 index a3167800..00000000 --- a/.github/workflows/ton-wasm-emscripten.yml +++ /dev/null @@ -1,46 +0,0 @@ -name: TON WASM Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - runs-on: ubuntu-22.04 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip nodejs libsecp256k1-dev libsodium-dev automake libtool - - - name: Setup compiler - run: | - wget https://apt.llvm.org/llvm.sh - chmod +x llvm.sh - sudo ./llvm.sh 16 all - - - name: Configure & Build - run: | - cd .github/script - ./fift-func-wasm-build-ubuntu.sh - - - name: Find & copy binaries - run: | - mkdir artifacts - ls build/crypto - cp build/crypto/fift* artifacts - cp build/crypto/func* artifacts - cp build/crypto/tlbc* artifacts - cp build/emulator/emulator-emscripten* artifacts - cp -R crypto/smartcont artifacts - cp -R crypto/fift/lib artifacts - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-wasm-binaries - path: artifacts \ No newline at end of file diff --git a/.github/workflows/ton-x86-64-linux.yml b/.github/workflows/ton-x86-64-linux.yml index a4760dc4..fdd91000 100644 --- a/.github/workflows/ton-x86-64-linux.yml +++ b/.github/workflows/ton-x86-64-linux.yml @@ -1,4 +1,4 @@ -name: "TON x86_64 Linux binaries" +name: Ubuntu TON build (portable, x86-64) on: [push,workflow_dispatch,workflow_call] @@ -20,19 +20,11 @@ jobs: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - name: Compile - run: nix build .?submodules=1#packages.x86_64-linux.ton-oldglibc_staticbinaries --print-build-logs --system x86_64-linux -o result-x86_64 - - - name: Copy binaries + - name: Build TON run: | - ls -lart - mkdir artifacts - cp $PWD/result-x86_64/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-x86_64/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so - cp $PWD/result-x86_64/lib/libemulator.so artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ + cp assembly/nix/build-linux-x86-64-nix.sh . + chmod +x build-linux-x86-64-nix.sh + ./build-linux-x86-64-nix.sh - name: Simple binaries test run: | diff --git a/.github/workflows/ton-x86-64-macos.yml b/.github/workflows/ton-x86-64-macos.yml index cea2937a..c0f90718 100644 --- a/.github/workflows/ton-x86-64-macos.yml +++ b/.github/workflows/ton-x86-64-macos.yml @@ -1,4 +1,4 @@ -name: "TON x86_64 macOS binaries" +name: MacOS TON build (portable, x86-64) on: [push,workflow_dispatch,workflow_call] @@ -16,19 +16,11 @@ jobs: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - name: Compile - run: nix build .?submodules=1#packages.x86_64-darwin.ton-staticbin-dylib --print-build-logs -o result-x86_64-darwin - - - name: Copy binaries + - name: Build TON run: | - ls -lart - mkdir artifacts - cp $PWD/result-x86_64-darwin/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-x86_64-darwin/lib/libtonlibjson.dylib artifacts/ - cp $PWD/result-x86_64-darwin/lib/libemulator.dylib artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ + cp assembly/nix/build-macos-nix.sh . + chmod +x build-macos-nix.sh + ./build-macos-nix.sh - name: Simple binaries test run: | diff --git a/.github/workflows/ton-x86-64-windows.yml b/.github/workflows/ton-x86-64-windows.yml new file mode 100644 index 00000000..67026183 --- /dev/null +++ b/.github/workflows/ton-x86-64-windows.yml @@ -0,0 +1,34 @@ +name: Windows TON build (portable, x86-64) + +on: [push,workflow_dispatch,workflow_call] + +defaults: + run: + shell: cmd + +jobs: + build: + + runs-on: windows-2022 + + steps: + - name: Get Current OS version + run: | + systeminfo | findstr /B /C:"OS Name" /C:"OS Version" + + - name: Check out current repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Build TON + run: | + copy assembly\native\build-windows-github.bat . + copy assembly\native\build-windows.bat . + build-windows-github.bat Enterprise + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-win-binaries + path: artifacts diff --git a/.github/workflows/tonlib-android-jni.yml b/.github/workflows/tonlib-android-jni.yml deleted file mode 100644 index 6e04f8b7..00000000 --- a/.github/workflows/tonlib-android-jni.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Tonlib Android JNI - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: ubuntu-22.04 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build - - - name: Configure & Build - run: | - wget -q https://dl.google.com/android/repository/android-ndk-r25b-linux.zip - unzip -q android-ndk-r25b-linux.zip - export JAVA_AWT_LIBRARY=NotNeeded - export JAVA_JVM_LIBRARY=NotNeeded - export JAVA_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux - - export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b - export NDK_PLATFORM="android-21" - export ANDROID_PLATFORM="android-21" - export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto - - rm -rf example/android/src/drinkless/org/ton/TonApi.java - cd example/android/ - - sudo apt install -y libtool autoconf libsodium-dev libsecp256k1-dev - - cmake -GNinja -DTON_ONLY_TONLIB=ON . - - ninja prepare_cross_compiling - - sudo apt remove -y libsodium-dev libsecp256k1-dev - - rm CMakeCache.txt - ./build-all.sh - find . -name "*.debug" -type f -delete - - - name: Find & copy binaries - run: | - mkdir -p artifacts/tonlib-android-jni - cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-android-jni/ - cp -R example/android/libs/* artifacts/tonlib-android-jni/ - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: tonlib-android-jni - path: artifacts \ No newline at end of file diff --git a/.github/workflows/ubuntu-22.04-compile.yml b/.github/workflows/ubuntu-22.04-compile.yml deleted file mode 100644 index af8943a1..00000000 --- a/.github/workflows/ubuntu-22.04-compile.yml +++ /dev/null @@ -1,78 +0,0 @@ -name: Ubuntu 22.04 Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: ubuntu-22.04 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build libsecp256k1-dev libsodium-dev - - - name: Show CPU flags - run: | - cat /proc/cpuinfo - - - name: Configure & Build - run: | - export CC=$(which clang) - export CXX=$(which clang++) - export CCACHE_DISABLE=1 - - git clone https://github.com/openssl/openssl openssl_3 - cd openssl_3 - git checkout openssl-3.1.4 - ./config - make build_libs -j4 - - cd .. - rootPath=`pwd` - mkdir build - cd build - - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_3/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_3/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. - - ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client \ - pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ - adnl-proxy create-state emulator \ - test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ - test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - - - name: Strip binaries - run: | - strip -g build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.* - - - name: Run tests - run: | - cd build - ctest --output-on-failure -E "test-catchain|test-actors" - - - name: Find & copy binaries - run: | - mkdir artifacts - cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.* artifacts - chmod +x artifacts/* - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ - - - name: Simple binaries test - run: | - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-ubuntu-binaries - path: artifacts diff --git a/.github/workflows/ubuntu-compile.yml b/.github/workflows/ubuntu-compile.yml deleted file mode 100644 index 3c1e7bad..00000000 --- a/.github/workflows/ubuntu-compile.yml +++ /dev/null @@ -1,81 +0,0 @@ -name: Ubuntu Compile x86-64 - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - strategy: - fail-fast: false - matrix: - os: [ubuntu-20.04, ubuntu-22.04] - runs-on: ${{ matrix.os }} - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build libsecp256k1-dev libsodium-dev - - - name: Show CPU flags - run: | - cat /proc/cpuinfo - - - name: Configure & Build - run: | - export CC=$(which clang) - export CXX=$(which clang++) - export CCACHE_DISABLE=1 - - mkdir build-${{ matrix.os }} - cd build-${{ matrix.os }} - - git clone https://github.com/openssl/openssl openssl_3 - cd openssl_3 - git checkout openssl-3.1.4 - ./config - make build_libs -j4 - - cd .. - rootPath=`pwd` - - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_3/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_3/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. - ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client \ - pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy \ - create-state create-hardfork emulator \ - test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor \ - test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - - - name: Strip binaries - run: | - strip -g build-${{ matrix.os }}/storage/storage-daemon/storage-daemon build-${{ matrix.os }}/storage/storage-daemon/storage-daemon-cli build-${{ matrix.os }}/crypto/fift build-${{ matrix.os }}/crypto/tlbc build-${{ matrix.os }}/crypto/func build-${{ matrix.os }}/crypto/create-state build-${{ matrix.os }}/validator-engine-console/validator-engine-console build-${{ matrix.os }}/tonlib/tonlib-cli build-${{ matrix.os }}/tonlib/libtonlibjson.so.0.5 build-${{ matrix.os }}/http/http-proxy build-${{ matrix.os }}/rldp-http-proxy/rldp-http-proxy build-${{ matrix.os }}/dht-server/dht-server build-${{ matrix.os }}/lite-client/lite-client build-${{ matrix.os }}/validator-engine/validator-engine build-${{ matrix.os }}/utils/generate-random-id build-${{ matrix.os }}/utils/json2tlo build-${{ matrix.os }}/adnl/adnl-proxy build-${{ matrix.os }}/emulator/libemulator.* - - - name: Run tests - run: | - cd build-${{ matrix.os }} - ctest --output-on-failure -E "test-catchain|test-actors" - - - name: Find & copy binaries - run: | - mkdir artifacts-${{ matrix.os }} - cp build-${{ matrix.os }}/storage/storage-daemon/storage-daemon build-${{ matrix.os }}/storage/storage-daemon/storage-daemon-cli build-${{ matrix.os }}/crypto/fift build-${{ matrix.os }}/crypto/tlbc build-${{ matrix.os }}/crypto/func build-${{ matrix.os }}/crypto/create-state build-${{ matrix.os }}/validator-engine-console/validator-engine-console build-${{ matrix.os }}/tonlib/tonlib-cli build-${{ matrix.os }}/tonlib/libtonlibjson.so.0.5 build-${{ matrix.os }}/http/http-proxy build-${{ matrix.os }}/rldp-http-proxy/rldp-http-proxy build-${{ matrix.os }}/dht-server/dht-server build-${{ matrix.os }}/lite-client/lite-client build-${{ matrix.os }}/validator-engine/validator-engine build-${{ matrix.os }}/utils/generate-random-id build-${{ matrix.os }}/utils/json2tlo build-${{ matrix.os }}/adnl/adnl-proxy build-${{ matrix.os }}/emulator/libemulator.* artifacts-${{ matrix.os }} - chmod +x artifacts-${{ matrix.os }}/* - cp -R crypto/smartcont artifacts-${{ matrix.os }} - cp -R crypto/fift/lib artifacts-${{ matrix.os }} - - - name: Simple binaries test - run: | - artifacts-${{ matrix.os }}/validator-engine -V - artifacts-${{ matrix.os }}/lite-client -V - artifacts-${{ matrix.os }}/fift -V - artifacts-${{ matrix.os }}/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-binaries-${{ matrix.os }} - path: artifacts-${{ matrix.os }} diff --git a/.github/workflows/win-2019-compile.yml b/.github/workflows/win-2019-compile.yml deleted file mode 100644 index e94655f7..00000000 --- a/.github/workflows/win-2019-compile.yml +++ /dev/null @@ -1,108 +0,0 @@ -name: Windows Server 2019 x64 Compile - -on: [push,workflow_dispatch,workflow_call] - -defaults: - run: - shell: cmd - -jobs: - build: - - runs-on: windows-2019 - - steps: - - name: Get Current OS version - run: | - systeminfo | findstr /B /C:"OS Name" /C:"OS Version" - - - name: Check out current repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Check out zlib repository - uses: actions/checkout@v3 - with: - repository: desktop-app/zlib - path: zlib - - - name: Setup msbuild.exe - uses: microsoft/setup-msbuild@v1.1 - - - name: Install Pkg-config Lite - run: choco install pkgconfiglite - - - name: Compile zlib Win64 - run: | - cd zlib\contrib\vstudio\vc14 - msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142 - - - name: Compile secp256k1 Win64 - run: | - git clone https://github.com/libbitcoin/secp256k1.git - cd secp256k1\builds\msvc\vs2017 - msbuild /p:Configuration=StaticRelease -p:PlatformToolset=v142 -p:Platform=x64 - - - name: Install pre-compiled libsodium Win64 - run: | - curl -Lo libsodium-1.0.18-stable-msvc.zip https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable-msvc.zip - unzip libsodium-1.0.18-stable-msvc.zip - - - name: Install pre-compiled OpenSSL 3 Win64 - run: | - curl -Lo openssl-3.1.4.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-3.1.4.zip - unzip openssl-3.1.4.zip - - - name: Install pre-compiled libmicrohttpd Win64 - run: | - curl -Lo libmicrohttpd-0.9.77-w32-bin.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/libmicrohttpd-0.9.77-w32-bin.zip - unzip libmicrohttpd-0.9.77-w32-bin.zip - - - name: Install pre-compiled Readline Win64 - run: | - curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip - unzip readline-5.0-1-lib.zip - - - name: Compile - run: | - set root=%cd% - set SODIUM_DIR=%root%\libsodium - echo %root% - echo %SODIUM_DIR% - mkdir build - cd build - cmake -DSODIUM_USE_STATIC_LIBS=1 -DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include -DSECP256K1_LIBRARY=%root%\secp256k1\bin\x64\Release\v142\static\secp256k1.lib -DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include\readline -DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-3.1.4/x64/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-3.1.4/x64/lib/libcrypto_static.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. - cmake --build . --config Release --target storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state - - - name: Run tests - run: | - cd build - ctest -C Release --output-on-failure -E "test-catchain|test-actors|test-validator-session-state" - - - name: Show executables - run: | - cd build - del Release\test-* - dir *.exe /a-D /S /B - dir *.dll /a-D /S /B - - - name: Check if validator-engine.exe exists - run: | - copy %cd%\build\validator-engine\Release\validator-engine.exe test - - - name: Find & copy binaries - run: | - mkdir artifacts - mkdir artifacts\smartcont - mkdir artifacts\lib - - for %%I in (build\storage\storage-daemon\Release\storage-daemon.exe build\storage\storage-daemon\Release\storage-daemon-cli.exe build\blockchain-explorer\blockchain-explorer.exe build\crypto\Release\fift.exe build\crypto\Release\tlbc.exe build\crypto\Release\func.exe build\crypto\Release\create-state.exe build\validator-engine-console\Release\validator-engine-console.exe build\tonlib\Release\tonlib-cli.exe build\tonlib\Release\tonlibjson.dll build\http\Release\http-proxy.exe build\rldp-http-proxy\Release\rldp-http-proxy.exe build\dht-server\Release\dht-server.exe build\lite-client\Release\lite-client.exe build\validator-engine\Release\validator-engine.exe build\utils\Release\generate-random-id.exe build\utils\Release\json2tlo.exe build\adnl\Release\adnl-proxy.exe build\emulator\Release\emulator.dll) do (strip -g %%I & copy %%I artifacts\) - xcopy /e /k /h /i crypto\smartcont artifacts\smartcont - xcopy /e /k /h /i crypto\fift\lib artifacts\lib - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-win-binaries - path: artifacts diff --git a/.gitignore b/.gitignore index 54d9ffc7..536918ab 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,14 @@ test/regression-tests.cache/ *.swp **/*build*/ .idea -.vscode \ No newline at end of file +.vscode +zlib/ +libsodium/ +libmicrohttpd-0.9.77-w32-bin/ +readline-5.0-1-lib/ +secp256k1/ +openssl-3.1.4/ +libsodium-1.0.18-stable-msvc.zip +libmicrohttpd-0.9.77-w32-bin.zip +openssl-3.1.4.zip +readline-5.0-1-lib.zip diff --git a/CMake/FindMHD.cmake b/CMake/FindMHD.cmake index c4b94c0e..7d6dd5fd 100644 --- a/CMake/FindMHD.cmake +++ b/CMake/FindMHD.cmake @@ -2,23 +2,26 @@ # Once done this will define # # MHD_FOUND - system has MHD -# MHD_INCLUDE_DIRS - the MHD include directory +# MHD_INCLUDE_DIR - the MHD include directory # MHD_LIBRARY - Link these to use MHD -find_path( - MHD_INCLUDE_DIR - NAMES microhttpd.h - DOC "microhttpd include dir" -) +if (NOT MHD_LIBRARY) + find_path( + MHD_INCLUDE_DIR + NAMES microhttpd.h + DOC "microhttpd include dir" + ) -find_library( - MHD_LIBRARY - NAMES microhttpd microhttpd-10 libmicrohttpd libmicrohttpd-dll - DOC "microhttpd library" -) + find_library( + MHD_LIBRARY + NAMES microhttpd microhttpd-10 libmicrohttpd libmicrohttpd-dll + DOC "microhttpd library" + ) +endif() -set(MHD_INCLUDE_DIRS ${MHD_INCLUDE_DIR}) -set(MHD_LIBRARIES ${MHD_LIBRARY}) +if (MHD_LIBRARY) + message(STATUS "Found MHD: ${MHD_LIBRARY}") +endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(MHD DEFAULT_MSG MHD_INCLUDE_DIR MHD_LIBRARY) diff --git a/CMake/FindSecp256k1.cmake b/CMake/FindSecp256k1.cmake index 11603f15..68a37c71 100644 --- a/CMake/FindSecp256k1.cmake +++ b/CMake/FindSecp256k1.cmake @@ -2,28 +2,27 @@ # Once done this will define # # SECP256K1_FOUND - system has SECP256K1 -# SECP256K1_INCLUDE_DIRS - the SECP256K1 include directory +# SECP256K1_INCLUDE_DIR - the SECP256K1 include directory # SECP256K1_LIBRARY - Link these to use SECP256K1 -find_path( - SECP256K1_INCLUDE_DIR - NAMES secp256k1_recovery.h - DOC "secp256k1_recovery.h include dir" -) +if (NOT SECP256K1_LIBRARY) + find_path( + SECP256K1_INCLUDE_DIR + NAMES secp256k1_recovery.h + DOC "secp256k1_recovery.h include dir" + ) -find_library( - SECP256K1_LIBRARY - NAMES secp256k1 libsecp256k1 - DOC "secp256k1 library" -) + find_library( + SECP256K1_LIBRARY + NAMES secp256k1 libsecp256k1 + DOC "secp256k1 library" + ) +endif() if (SECP256K1_LIBRARY) message(STATUS "Found Secp256k1: ${SECP256K1_LIBRARY}") endif() -set(SECP256K1_INCLUDE_DIRS ${SECP256K1_INCLUDE_DIR}) -set(SECP256K1_LIBRARIES ${SECP256K1_LIBRARY}) - include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Secp256k1 DEFAULT_MSG SECP256K1_INCLUDE_DIR SECP256K1_LIBRARY) mark_as_advanced(SECP256K1_INCLUDE_DIR SECP256K1_LIBRARY) diff --git a/CMake/FindSodium.cmake b/CMake/FindSodium.cmake index 0053ac54..85194ee2 100644 --- a/CMake/FindSodium.cmake +++ b/CMake/FindSodium.cmake @@ -26,6 +26,7 @@ # Furthermore an imported "sodium" target is created. # + if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_C_COMPILER_ID STREQUAL "Clang") set(_GCC_COMPATIBLE 1) diff --git a/CMakeLists.txt b/CMakeLists.txt index c6d7ed87..89be3238 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -211,7 +211,13 @@ set(CMAKE_THREAD_PREFER_PTHREAD ON) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) find_package(PkgConfig REQUIRED) -find_package(ZLIB REQUIRED) + +if (NOT ZLIB_FOUND) + find_package(ZLIB REQUIRED) +else() + message(STATUS "Using zlib ${ZLIB_LIBRARIES}") +endif() + if (TON_ARCH AND NOT MSVC) CHECK_CXX_COMPILER_FLAG( "-march=${TON_ARCH}" COMPILER_OPT_ARCH_SUPPORTED ) @@ -371,6 +377,9 @@ if (LATEX_FOUND) add_latex_document(doc/fiftbase.tex TARGET_NAME fift_basic_description) add_latex_document(doc/catchain.tex TARGET_NAME catchain_consensus_description) endif() +if (NOT LATEX_FOUND) + message(STATUS "Could NOT find LATEX (this is NOT an error)") +endif() #END internal function(target_link_libraries_system target) @@ -580,8 +589,8 @@ if (NOT NIX) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/crypto/func/auto-tests) if (WIN32) set_property(TEST test-func PROPERTY ENVIRONMENT - "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/func.exe" - "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/fift.exe" + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func.exe" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift.exe" "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") else() set_property(TEST test-func PROPERTY ENVIRONMENT @@ -596,8 +605,8 @@ if (NOT NIX) WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/crypto/func/auto-tests) if (WIN32) set_property(TEST test-func-legacy PROPERTY ENVIRONMENT - "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/func.exe" - "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/Release/fift.exe" + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func.exe" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift.exe" "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") else() set_property(TEST test-func-legacy PROPERTY ENVIRONMENT diff --git a/README.md b/README.md index 7e78bb04..653f2f83 100644 --- a/README.md +++ b/README.md @@ -43,7 +43,7 @@ __The Open Network (TON)__ is a fast, secure, scalable blockchain focused on han - To work on TON check [wallets](https://ton.app/wallets), [explorers](https://ton.app/explorers), [DEXes](https://ton.app/dex) and [utilities](https://ton.app/utilities) - To interact with TON check [APIs](https://ton.org/docs/develop/dapps/apis/) -## Updates flow: +## Updates flow * **master branch** - mainnet is running on this stable branch. @@ -61,12 +61,90 @@ Usually, the response to your pull request will indicate which section it falls * Thou shall not merge your own PRs, at least one person should review the PR and merge it (4-eyes rule) * Thou shall make sure that workflows are cleanly completed for your PR before considering merge -## Workflows responsibility -If a CI workflow fails not because of your changes but workflow issues, try to fix it yourself or contact one of the persons listed below via Telegram messenger: +## Build TON blockchain -* **C/C++ CI (ccpp-linux.yml)**: TBD -* **C/C++ CI Win64 Compile (ccpp-win64.yml)**: TBD +### Ubuntu 20.4, 22.04 (x86-64, aarch64) +Install additional system libraries +```bash + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 16 all +``` +Compile TON binaries +```bash + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh +``` +### MacOS 11, 12 (x86-64, aarch64) +```bash + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh +``` + +### Windows 10, 11, Server (x86-64) +You need to install `MS Visual Studio 2022` first. +Go to https://www.visualstudio.com/downloads/ and download `MS Visual Studio 2022 Community`. + +Launch installer and select `Desktop development with C++`. +After installation, also make sure that `cmake` is globally available by adding +`C:\Program Files\Microsoft Visual Studio\2022\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin` to the system `PATH` (adjust the path per your needs). + +Open an elevated (Run as Administrator) `x86-64 Native Tools Command Prompt for VS 2022`, go to the root folder and execute: +```bash + copy assembly\native\build-windows.bat . + build-windows.bat +``` + +### Building TON to WebAssembly +Install additional system libraries on Ubuntu +```bash + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 16 all +``` +Compile TON binaries with emscripten +```bash + cd assembly/wasm + chmod +x fift-func-wasm-build-ubuntu.sh + ./fift-func-wasm-build-ubuntu.sh +``` + +### Building TON tonlib library for Android (arm64-v8a, armeabi-v7a, x86, x86-64) +Install additional system libraries on Ubuntu +```bash + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build automake libtool texinfo autoconf libgflags-dev \ + zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev \ + libtool autoconf libsodium-dev libsecp256k1-dev +``` +Compile TON tonlib library +```bash + cp assembly/android/build-android-tonlib.sh . + chmod +x build-android-tonlib.sh + ./build-android-tonlib.sh +``` + +### Build TON portable binaries with Nix package manager +You need to install Nix first. +```bash + sh <(curl -L https://nixos.org/nix/install) --daemon +``` +Then compile TON with Nix by executing below command from the root folder: +```bash + cp -r assembly/nix/* . + export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + nix-build linux-x86-64-static.nix +``` +More examples for other platforms can be found under `assembly/nix`. ## Running tests diff --git a/assembly/android/build-android-tonlib.sh b/assembly/android/build-android-tonlib.sh new file mode 100644 index 00000000..e470f602 --- /dev/null +++ b/assembly/android/build-android-tonlib.sh @@ -0,0 +1,55 @@ +with_artifacts=false + +while getopts 'a' flag; do + case "${flag}" in + a) with_artifacts=true ;; + *) break + ;; + esac +done + +if [ ! -d android-ndk-r25b ]; then + rm android-ndk-r25b-linux.zip + wget -q https://dl.google.com/android/repository/android-ndk-r25b-linux.zip + unzip -q android-ndk-r25b-linux.zip + test $? -eq 0 || { echo "Can't unzip android-ndk-r25b-linux.zip"; exit 1; } + echo Android NDK extracted +else + echo Using extracted Android NDK +fi + +export JAVA_AWT_LIBRARY=NotNeeded +export JAVA_JVM_LIBRARY=NotNeeded +export JAVA_INCLUDE_PATH=${JAVA_HOME}/include +export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include +export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux + +export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b +export NDK_PLATFORM="android-21" +export ANDROID_PLATFORM="android-21" +export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto + +rm -rf example/android/src/drinkless/org/ton/TonApi.java +cd example/android/ + +rm CMakeCache.txt .ninja_* +cmake -GNinja -DTON_ONLY_TONLIB=ON . + +test $? -eq 0 || { echo "Can't configure TON"; exit 1; } + +ninja prepare_cross_compiling + +test $? -eq 0 || { echo "Can't compile prepare_cross_compiling"; exit 1; } + +rm CMakeCache.txt .ninja_* + +. ./build-all.sh + +find . -name "*.debug" -type f -delete + +if [ "$with_artifacts" = true ]; then + cd ../.. + mkdir -p artifacts/tonlib-android-jni + cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-android-jni/ + cp -R example/android/libs/* artifacts/tonlib-android-jni/ +fi diff --git a/assembly/cicd/jenkins/test-builds.groovy b/assembly/cicd/jenkins/test-builds.groovy new file mode 100644 index 00000000..380efedd --- /dev/null +++ b/assembly/cicd/jenkins/test-builds.groovy @@ -0,0 +1,236 @@ +pipeline { + agent none + stages { + stage('Run Builds') { + parallel { + stage('Ubuntu 20.04 x86-64 (shared)') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-x86_64-linux-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86_64-linux-shared.zip' + } + } + } + stage('Ubuntu 20.04 x86-64 (portable)') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-linux-x86-64-nix.sh . + chmod +x build-linux-x86-64-nix.sh + ./build-linux-x86-64-nix.sh + ''' + sh ''' + cd artifacts + zip -9r ton-x86-64-linux-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-linux-portable.zip' + } + } + } + stage('Ubuntu 20.04 aarch64 (shared)') { + agent { + label 'Ubuntu_arm64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-linux-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-linux-shared.zip' + } + } + } + stage('Ubuntu 20.04 aarch64 (portable)') { + agent { + label 'Ubuntu_arm64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-linux-arm64-nix.sh . + chmod +x build-linux-arm64-nix.sh + ./build-linux-arm64-nix.sh + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-linux-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-linux-portable.zip' + } + } + } + stage('macOS 12.7 x86-64 (shared)') { + agent { + label 'macOS_12.7_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-x86-64-macos-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-macos-shared.zip' + } + } + } + stage('macOS 12.7 x86-64 (portable)') { + agent { + label 'macOS_12.7_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-macos-nix.sh . + chmod +x build-macos-nix.sh + ./build-macos-nix.sh + ''' + sh ''' + cd artifacts + zip -9r ton-x86-64-macos-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-macos-portable.zip' + } + } + } + stage('macOS 12.6 aarch64 (shared)') { + agent { + label 'macOS_12.6-arm64-m1' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-macos-m1-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-macos-m1-shared.zip' + } + } + } + stage('macOS 12.6 aarch64 (portable)') { + agent { + label 'macOS_12.6-arm64-m1' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-macos-nix.sh . + chmod +x build-macos-nix.sh + ./build-macos-nix.sh + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-macos-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-macos-portable.zip' + } + } + } + stage('macOS 13.2 aarch64 (shared)') { + agent { + label 'macOS_13.2-arm64-m2' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-macos-m2-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-macos-m2-shared.zip' + } + } + } + stage('Windows Server 2022 x86-64') { + agent { + label 'Windows_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + bat ''' + copy assembly\\native\\build-windows.bat . + build-windows.bat + ''' + bat ''' + cd artifacts + zip -9r ton-x86-64-windows ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-windows.zip' + } + } + } + stage('Android Tonlib') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/android/build-android-tonlib.sh . + chmod +x build-android-tonlib.sh + ./build-android-tonlib.sh -a + ''' + sh ''' + cd artifacts/tonlib-android-jni + zip -9r ton-android-tonlib ./* + ''' + archiveArtifacts artifacts: 'artifacts/tonlib-android-jni/ton-android-tonlib.zip' + } + } + } + stage('WASM fift func emulator') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cd assembly/wasm + chmod +x fift-func-wasm-build-ubuntu.sh + ./fift-func-wasm-build-ubuntu.sh -a + ''' + sh ''' + cd artifacts + zip -9r ton-wasm-binaries ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-wasm-binaries.zip' + } + } + } + } + } + } +} \ No newline at end of file diff --git a/assembly/native/build-macos-portable.sh b/assembly/native/build-macos-portable.sh new file mode 100644 index 00000000..a4187d4c --- /dev/null +++ b/assembly/native/build-macos-portable.sh @@ -0,0 +1,207 @@ +#/bin/bash + +with_tests=false +with_artifacts=false +OSX_TARGET=10.15 + + +while getopts 'tao:' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + o) OSX_TARGET=${OPTARG} ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export NONINTERACTIVE=1 +brew install ninja pkg-config automake libtool autoconf +brew install llvm@16 + + +if [ -f /opt/homebrew/opt/llvm@16/bin/clang ]; then + export CC=/opt/homebrew/opt/llvm@16/bin/clang + export CXX=/opt/homebrew/opt/llvm@16/bin/clang++ +else + export CC=/usr/local/opt/llvm@16/bin/clang + export CXX=/usr/local/opt/llvm@16/bin/clang++ +fi +export CCACHE_DISABLE=1 + +if [ ! -d "secp256k1" ]; then +git clone https://github.com/bitcoin-core/secp256k1.git +cd secp256k1 +secp256k1Path=`pwd` +git checkout v0.3.2 +./autogen.sh +./configure --enable-module-recovery --enable-static --disable-tests --disable-benchmark --with-pic +make -j12 +test $? -eq 0 || { echo "Can't compile secp256k1"; exit 1; } +cd .. +else + secp256k1Path=$(pwd)/secp256k1 + echo "Using compiled secp256k1" +fi + +if [ ! -d "libsodium" ]; then + export LIBSODIUM_FULL_BUILD=1 + git clone https://github.com/jedisct1/libsodium.git + cd libsodium + sodiumPath=`pwd` + git checkout 1.0.18 + ./autogen.sh + ./configure --with-pic --enable-static + make -j12 + test $? -eq 0 || { echo "Can't compile libsodium"; exit 1; } + cd .. +else + sodiumPath=$(pwd)/libsodium + echo "Using compiled libsodium" +fi + +if [ ! -d "openssl_3" ]; then + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + opensslPath=`pwd` + git checkout openssl-3.1.4 + ./config -static + make build_libs -j12 + test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; } + cd .. +else + opensslPath=$(pwd)/openssl_3 + echo "Using compiled openssl_3" +fi + +if [ ! -d "zlib" ]; then + git clone https://github.com/madler/zlib.git + cd zlib + zlibPath=`pwd` + ./configure --static + make -j12 + test $? -eq 0 || { echo "Can't compile zlib"; exit 1; } + cd .. +else + zlibPath=$(pwd)/zlib + echo "Using compiled zlib" +fi + +if [ ! -d "libmicrohttpd" ]; then + git clone https://git.gnunet.org/libmicrohttpd.git + cd libmicrohttpd + libmicrohttpdPath=`pwd` + ./autogen.sh + ./configure --enable-static --disable-tests --disable-benchmark --disable-shared --disable-https --with-pic + make -j12 + test $? -eq 0 || { echo "Can't compile libmicrohttpd"; exit 1; } + cd .. +else + libmicrohttpdPath=$(pwd)/libmicrohttpd + echo "Using compiled libmicrohttpd" +fi + +cmake -GNinja .. \ +-DPORTABLE=1 \ +-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=$OSX_TARGET \ +-DCMAKE_CXX_FLAGS="-stdlib=libc++" \ +-DCMAKE_BUILD_TYPE=Release \ +-DOPENSSL_FOUND=1 \ +-DOPENSSL_INCLUDE_DIR=$opensslPath/include \ +-DOPENSSL_CRYPTO_LIBRARY=$opensslPath/libcrypto.a \ +-DZLIB_FOUND=1 \ +-DZLIB_INCLUDE_DIR=$zlibPath \ +-DZLIB_LIBRARIES=$zlibPath/libz.a \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=$secp256k1Path/include \ +-DSECP256K1_LIBRARY=$secp256k1Path/.libs/libsecp256k1.a \ +-DSODIUM_FOUND=1 \ +-DSODIUM_INCLUDE_DIR=$sodiumPath/src/libsodium/include \ +-DSODIUM_LIBRARY_RELEASE=$sodiumPath/src/libsodium/.libs/libsodium.a \ +-DMHD_FOUND=1 \ +-DMHD_INCLUDE_DIR=$libmicrohttpdPath/src/include \ +-DMHD_LIBRARY=$libmicrohttpdPath/src/microhttpd/.libs/libmicrohttpd.a + + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \ + test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \ + test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + +strip storage/storage-daemon/storage-daemon +strip storage/storage-daemon/storage-daemon-cli +strip blockchain-explorer/blockchain-explorer +strip crypto/fift +strip crypto/func +strip crypto/create-state +strip crypto/tlbc +strip validator-engine-console/validator-engine-console +strip tonlib/tonlib-cli +strip http/http-proxy +strip rldp-http-proxy/rldp-http-proxy +strip dht-server/dht-server +strip lite-client/lite-client +strip validator-engine/validator-engine +strip utils/generate-random-id +strip utils/json2tlo +strip adnl/adnl-proxy + +cd .. + +if [ "$with_artifacts" = true ]; then + echo Creating artifacts... + rm -rf artifacts + mkdir artifacts + cp crypto/fift/lib artifacts/ + cp -R crypto/smartcont/ artifacts/ + cp build/storage/storage-daemon/storage-daemon artifacts/ + cp build/storage/storage-daemon/storage-daemon-cli artifacts/ + cp build/blockchain-explorer/blockchain-explorer artifacts/ + cp build/crypto/fift artifacts/ + cp build/crypto/func artifacts/ + cp build/crypto/create-state artifacts/ + cp build/crypto/tlbc artifacts/ + cp build/validator-engine-console/validator-engine-console artifacts/ + cp build/tonlib/tonlib-cli artifacts/ + cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib + cp build/http/http-proxy artifacts/ + cp build/rldp-http-proxy/rldp-http-proxy artifacts/ + cp build/dht-server/dht-server artifacts/ + cp build/lite-client/lite-client artifacts/ + cp build/validator-engine/validator-engine artifacts/ + cp build/utils/generate-random-id artifacts/ + cp build/utils/json2tlo artifacts/ + cp build/adnl/adnl-proxy artifacts/ + cp build/emulator/libemulator.dylib artifacts/ + chmod +x artifacts/* + rsync -r crypto/smartcont artifacts/ + rsync -r crypto/fift/lib artifacts/ +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors" + ctest --output-on-failure +fi diff --git a/assembly/native/build-macos-shared.sh b/assembly/native/build-macos-shared.sh new file mode 100644 index 00000000..7b4f90ee --- /dev/null +++ b/assembly/native/build-macos-shared.sh @@ -0,0 +1,136 @@ +#/bin/bash + +with_tests=false +with_artifacts=false +OSX_TARGET=10.15 + + +while getopts 'tao:' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + o) OSX_TARGET=${OPTARG} ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export NONINTERACTIVE=1 +brew install ninja libsodium libmicrohttpd pkg-config automake libtool autoconf gnutls +brew install llvm@16 + +if [ -f /opt/homebrew/opt/llvm@16/bin/clang ]; then + export CC=/opt/homebrew/opt/llvm@16/bin/clang + export CXX=/opt/homebrew/opt/llvm@16/bin/clang++ +else + export CC=/usr/local/opt/llvm@16/bin/clang + export CXX=/usr/local/opt/llvm@16/bin/clang++ +fi +export CCACHE_DISABLE=1 + +if [ ! -d "secp256k1" ]; then + git clone https://github.com/bitcoin-core/secp256k1.git + cd secp256k1 + secp256k1Path=`pwd` + git checkout v0.3.2 + ./autogen.sh + ./configure --enable-module-recovery --enable-static --disable-tests --disable-benchmark + make -j12 + test $? -eq 0 || { echo "Can't compile secp256k1"; exit 1; } + cd .. +else + secp256k1Path=$(pwd)/secp256k1 + echo "Using compiled secp256k1" +fi + +brew unlink openssl@1.1 +brew install openssl@3 +brew unlink openssl@3 && brew link --overwrite openssl@3 + +cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \ +-DCMAKE_CXX_FLAGS="-stdlib=libc++" \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=$secp256k1Path/include \ +-DSECP256K1_LIBRARY=$secp256k1Path/.libs/libsecp256k1.a + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \ + test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \ + test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + + +strip storage/storage-daemon/storage-daemon +strip storage/storage-daemon/storage-daemon-cli +strip blockchain-explorer/blockchain-explorer +strip crypto/fift +strip crypto/func +strip crypto/create-state +strip crypto/tlbc +strip validator-engine-console/validator-engine-console +strip tonlib/tonlib-cli +strip http/http-proxy +strip rldp-http-proxy/rldp-http-proxy +strip dht-server/dht-server +strip lite-client/lite-client +strip validator-engine/validator-engine +strip utils/generate-random-id +strip utils/json2tlo +strip adnl/adnl-proxy + +cd .. + +if [ "$with_artifacts" = true ]; then + echo Creating artifacts... + rm -rf artifacts + mkdir artifacts + cp build/storage/storage-daemon/storage-daemon artifacts/ + cp build/storage/storage-daemon/storage-daemon-cli artifacts/ + cp build/blockchain-explorer/blockchain-explorer artifacts/ + cp build/crypto/fift artifacts/ + cp build/crypto/func artifacts/ + cp build/crypto/create-state artifacts/ + cp build/crypto/tlbc artifacts/ + cp build/validator-engine-console/validator-engine-console artifacts/ + cp build/tonlib/tonlib-cli artifacts/ + cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib + cp build/http/http-proxy artifacts/ + cp build/rldp-http-proxy/rldp-http-proxy artifacts/ + cp build/dht-server/dht-server artifacts/ + cp build/lite-client/lite-client artifacts/ + cp build/validator-engine/validator-engine artifacts/ + cp build/utils/generate-random-id artifacts/ + cp build/utils/json2tlo artifacts/ + cp build/adnl/adnl-proxy artifacts/ + cp build/emulator/libemulator.dylib artifacts/ + chmod +x artifacts/* + rsync -r crypto/smartcont artifacts/ + rsync -r crypto/fift/lib artifacts/ +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors" + ctest --output-on-failure --timeout 1800 +fi diff --git a/assembly/native/build-ubuntu-portable.sh b/assembly/native/build-ubuntu-portable.sh new file mode 100644 index 00000000..81dbe710 --- /dev/null +++ b/assembly/native/build-ubuntu-portable.sh @@ -0,0 +1,198 @@ +#/bin/bash + +#sudo apt-get update +#sudo apt-get install -y build-essential git cmake ninja-build automake libtool texinfo autoconf + +with_tests=false +with_artifacts=false + + +while getopts 'ta' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export CC=$(which clang-16) +export CXX=$(which clang++-16) +export CCACHE_DISABLE=1 + + +if [ ! -d "secp256k1" ]; then +git clone https://github.com/bitcoin-core/secp256k1.git +cd secp256k1 +secp256k1Path=`pwd` +git checkout v0.3.2 +./autogen.sh +./configure --enable-module-recovery --enable-static --disable-tests --disable-benchmark --with-pic +make -j12 +test $? -eq 0 || { echo "Can't compile secp256k1"; exit 1; } +cd .. +# ./.libs/libsecp256k1.a +# ./include +else + secp256k1Path=$(pwd)/secp256k1 + echo "Using compiled secp256k1" +fi + +if [ ! -d "libsodium" ]; then + export LIBSODIUM_FULL_BUILD=1 + git clone https://github.com/jedisct1/libsodium.git + cd libsodium + sodiumPath=`pwd` + git checkout 1.0.18 + ./autogen.sh + ./configure --with-pic --enable-static + make -j12 + test $? -eq 0 || { echo "Can't compile libsodium"; exit 1; } + cd .. +else + sodiumPath=$(pwd)/libsodium + echo "Using compiled libsodium" +fi + +if [ ! -d "openssl_3" ]; then + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + opensslPath=`pwd` + git checkout openssl-3.1.4 + ./config -static + make build_libs -j12 + test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; } + cd .. +else + opensslPath=$(pwd)/openssl_3 + echo "Using compiled openssl_3" +fi + +if [ ! -d "zlib" ]; then + git clone https://github.com/madler/zlib.git + cd zlib + zlibPath=`pwd` + ./configure --static + make -j12 + test $? -eq 0 || { echo "Can't compile zlib"; exit 1; } + cd .. +else + zlibPath=$(pwd)/zlib + echo "Using compiled zlib" +fi + +if [ ! -d "libmicrohttpd" ]; then + git clone https://git.gnunet.org/libmicrohttpd.git + cd libmicrohttpd + libmicrohttpdPath=`pwd` + ./autogen.sh + ./configure --enable-static --disable-tests --disable-benchmark --disable-shared --disable-https --with-pic + make -j12 + test $? -eq 0 || { echo "Can't compile libmicrohttpd"; exit 1; } + cd .. +else + libmicrohttpdPath=$(pwd)/libmicrohttpd + echo "Using compiled libmicrohttpd" +fi + +cmake -GNinja .. \ +-DPORTABLE=1 \ +-DCMAKE_BUILD_TYPE=Release \ +-DOPENSSL_FOUND=1 \ +-DOPENSSL_INCLUDE_DIR=$opensslPath/include \ +-DOPENSSL_CRYPTO_LIBRARY=$opensslPath/libcrypto.a \ +-DZLIB_FOUND=1 \ +-DZLIB_INCLUDE_DIR=$zlibPath \ +-DZLIB_LIBRARIES=$zlibPath/libz.a \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=$secp256k1Path/include \ +-DSECP256K1_LIBRARY=$secp256k1Path/.libs/libsecp256k1.a \ +-DSODIUM_FOUND=1 \ +-DSODIUM_INCLUDE_DIR=$sodiumPath/src/libsodium/include \ +-DSODIUM_LIBRARY_RELEASE=$sodiumPath/src/libsodium/.libs/libsodium.a \ +-DMHD_FOUND=1 \ +-DMHD_INCLUDE_DIR=$libmicrohttpdPath/src/include \ +-DMHD_LIBRARY=$libmicrohttpdPath/src/microhttpd/.libs/libmicrohttpd.a + + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \ + test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \ + test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \ + test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + +strip -g storage/storage-daemon/storage-daemon \ + storage/storage-daemon/storage-daemon-cli \ + blockchain-explorer/blockchain-explorer \ + crypto/fift \ + crypto/tlbc \ + crypto/func \ + crypto/create-state \ + validator-engine-console/validator-engine-console \ + tonlib/tonlib-cli \ + tonlib/libtonlibjson.so.0.5 \ + http/http-proxy \ + rldp-http-proxy/rldp-http-proxy \ + dht-server/dht-server \ + lite-client/lite-client \ + validator-engine/validator-engine \ + utils/generate-random-id \ + utils/json2tlo \ + adnl/adnl-proxy \ + emulator/libemulator.* + +test $? -eq 0 || { echo "Can't strip final binaries"; exit 1; } + +# simple binaries' test +./storage/storage-daemon/storage-daemon -V || exit 1 +./validator-engine/validator-engine -V || exit 1 +./lite-client/lite-client -V || exit 1 +./crypto/fift -V || exit 1 + +cd .. + +if [ "$with_artifacts" = true ]; then + rm -rf artifacts + mkdir artifacts + cp crypto/fift/lib artifacts/ + cp -R crypto/smartcont/ artifacts/ + mv build/tonlib/libtonlibjson.so.0.5 build/tonlib/libtonlibjson.so + cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli \ + build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/blockchain-explorer/blockchain-explorer \ + build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli \ + build/tonlib/libtonlibjson.so build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy \ + build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine \ + build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.so \ + artifacts + test $? -eq 0 || { echo "Can't copy final binaries"; exit 1; } + chmod +x artifacts/* + cp -R crypto/smartcont artifacts + cp -R crypto/fift/lib artifacts +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors|test-smartcont|test-adnl|test-validator-session-state|test-dht|test-rldp" + ctest --output-on-failure -E "test-adnl" +fi diff --git a/assembly/native/build-ubuntu-shared.sh b/assembly/native/build-ubuntu-shared.sh new file mode 100644 index 00000000..12c819cd --- /dev/null +++ b/assembly/native/build-ubuntu-shared.sh @@ -0,0 +1,122 @@ +#/bin/bash + +#sudo apt-get update +#sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + +with_tests=false +with_artifacts=false + + +while getopts 'ta' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export CC=$(which clang-16) +export CXX=$(which clang++-16) +export CCACHE_DISABLE=1 + +if [ ! -d "openssl_3" ]; then + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + opensslPath=`pwd` + git checkout openssl-3.1.4 + ./config + make build_libs -j12 + test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; } + cd .. +else + opensslPath=$(pwd)/openssl_3 + echo "Using compiled openssl_3" +fi + +cmake -GNinja .. \ +-DCMAKE_BUILD_TYPE=Release \ +-DOPENSSL_ROOT_DIR=$opensslPath \ +-DOPENSSL_INCLUDE_DIR=$opensslPath/include \ +-DOPENSSL_CRYPTO_LIBRARY=$opensslPath/libcrypto.so + + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \ + test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \ + test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \ + test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + +strip -g storage/storage-daemon/storage-daemon \ + storage/storage-daemon/storage-daemon-cli \ + blockchain-explorer/blockchain-explorer \ + crypto/fift \ + crypto/tlbc \ + crypto/func \ + crypto/create-state \ + validator-engine-console/validator-engine-console \ + tonlib/tonlib-cli \ + tonlib/libtonlibjson.so.0.5 \ + http/http-proxy \ + rldp-http-proxy/rldp-http-proxy \ + dht-server/dht-server \ + lite-client/lite-client \ + validator-engine/validator-engine \ + utils/generate-random-id \ + utils/json2tlo \ + adnl/adnl-proxy \ + emulator/libemulator.* + +test $? -eq 0 || { echo "Can't strip final binaries"; exit 1; } + +# simple binaries' test +./storage/storage-daemon/storage-daemon -V || exit 1 +./validator-engine/validator-engine -V || exit 1 +./lite-client/lite-client -V || exit 1 +./crypto/fift -V || exit 1 + +cd .. + +if [ "$with_artifacts" = true ]; then + rm -rf artifacts + mkdir artifacts + mv build/tonlib/libtonlibjson.so.0.5 build/tonlib/libtonlibjson.so + cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli \ + build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/blockchain-explorer/blockchain-explorer \ + build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli \ + build/tonlib/libtonlibjson.so build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy \ + build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine \ + build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.so \ + artifacts + test $? -eq 0 || { echo "Can't copy final binaries"; exit 1; } + chmod +x artifacts/* + cp -R crypto/smartcont artifacts + cp -R crypto/fift/lib artifacts +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors|test-smartcont|test-adnl|test-validator-session-state|test-dht|test-rldp" + ctest --output-on-failure --timeout 1800 +fi \ No newline at end of file diff --git a/assembly/native/build-windows-github.bat b/assembly/native/build-windows-github.bat new file mode 100644 index 00000000..7cad8c7e --- /dev/null +++ b/assembly/native/build-windows-github.bat @@ -0,0 +1,2 @@ +call "C:\Program Files\Microsoft Visual Studio\2022\%1\VC\Auxiliary\Build\vcvars64.bat" +call build-windows.bat -t \ No newline at end of file diff --git a/assembly/native/build-windows.bat b/assembly/native/build-windows.bat new file mode 100644 index 00000000..2e3f2082 --- /dev/null +++ b/assembly/native/build-windows.bat @@ -0,0 +1,193 @@ +REM execute this script inside elevated (Run as Administrator) console "x64 Native Tools Command Prompt for VS 2022" + +echo off + +echo Installing chocolatey windows package manager... +@"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin" +choco -? +IF %errorlevel% NEQ 0 ( + echo Can't install chocolatey + exit /b %errorlevel% +) + +choco feature enable -n allowEmptyChecksums + +echo Installing pkgconfiglite... +choco install -y pkgconfiglite +IF errorlevel 1 ( + echo Can't install pkgconfiglite + exit /b %errorlevel% +) + +echo Installing ninja... +choco install -y ninja +IF errorlevel 1 ( + echo Can't install ninja + exit /b %errorlevel% +) + +if not exist "zlib" ( +git clone https://github.com/madler/zlib.git +cd zlib\contrib\vstudio\vc14 +msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v143 + +IF errorlevel 1 ( + echo Can't install zlib + exit /b %errorlevel% +) +cd ..\..\..\.. +) else ( +echo Using zlib... +) + +if not exist "secp256k1" ( +git clone https://github.com/libbitcoin/secp256k1.git +cd secp256k1\builds\msvc\vs2017 +msbuild /p:Configuration=StaticRelease -p:PlatformToolset=v143 -p:Platform=x64 +IF errorlevel 1 ( + echo Can't install secp256k1 + exit /b %errorlevel% +) +cd ..\..\..\.. +) else ( +echo Using secp256k1... +) + + +if not exist "libsodium" ( +curl -Lo libsodium-1.0.18-stable-msvc.zip https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable-msvc.zip +IF errorlevel 1 ( + echo Can't download libsodium + exit /b %errorlevel% +) +unzip libsodium-1.0.18-stable-msvc.zip +) else ( +echo Using libsodium... +) + +if not exist "openssl-3.1.4" ( +curl -Lo openssl-3.1.4.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-3.1.4.zip +IF errorlevel 1 ( + echo Can't download OpenSSL + exit /b %errorlevel% +) +unzip -q openssl-3.1.4.zip +) else ( +echo Using openssl... +) + +if not exist "libmicrohttpd-0.9.77-w32-bin" ( +curl -Lo libmicrohttpd-0.9.77-w32-bin.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/libmicrohttpd-0.9.77-w32-bin.zip +IF errorlevel 1 ( + echo Can't download libmicrohttpd + exit /b %errorlevel% +) +unzip -q libmicrohttpd-0.9.77-w32-bin.zip +) else ( +echo Using libmicrohttpd... +) + +if not exist "readline-5.0-1-lib" ( +curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip +IF errorlevel 1 ( + echo Can't download readline + exit /b %errorlevel% +) +unzip -q -d readline-5.0-1-lib readline-5.0-1-lib.zip +) else ( +echo Using readline... +) + + +set root=%cd% +echo %root% +set SODIUM_DIR=%root%\libsodium + +mkdir build +cd build +cmake -GNinja -DCMAKE_BUILD_TYPE=Release ^ +-DPORTABLE=1 ^ +-DSODIUM_USE_STATIC_LIBS=1 ^ +-DSECP256K1_FOUND=1 ^ +-DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include ^ +-DSECP256K1_LIBRARY=%root%\secp256k1\bin\x64\Release\v143\static\secp256k1.lib ^ +-DMHD_FOUND=1 ^ +-DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib ^ +-DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static ^ +-DZLIB_FOUND=1 ^ +-DZLIB_INCLUDE_DIR=%root%\zlib ^ +-DZLIB_LIBRARIES=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib ^ +-DOPENSSL_FOUND=1 ^ +-DOPENSSL_INCLUDE_DIR=%root%/openssl-3.1.4/x64/include ^ +-DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-3.1.4/x64/lib/libcrypto_static.lib ^ +-DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj" .. +IF errorlevel 1 ( + echo Can't configure TON + exit /b %errorlevel% +) + +IF "%1"=="-t" ( +ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^ +tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^ +json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^ +test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^ +test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^ +test-fec test-tddb test-db test-validator-session-state +IF errorlevel 1 ( + echo Can't compile TON + exit /b %errorlevel% +) +) else ( +ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^ +tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^ +json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator +IF errorlevel 1 ( + echo Can't compile TON + exit /b %errorlevel% +) +) + +copy validator-engine\validator-engine.exe test +IF errorlevel 1 ( + echo validator-engine.exe does not exist + exit /b %errorlevel% +) + +IF "%1"=="-t" ( + echo Running tests... +REM ctest -C Release --output-on-failure -E "test-catchain|test-actors|test-validator-session-state" + ctest -C Release --output-on-failure --timeout 1800 + IF errorlevel 1 ( + echo Some tests failed + exit /b %errorlevel% + ) +) + + +echo Creating artifacts... +cd .. +mkdir artifacts +mkdir artifacts\smartcont +mkdir artifacts\lib + +for %%I in (build\storage\storage-daemon\storage-daemon.exe ^ +build\storage\storage-daemon\storage-daemon-cli.exe ^ +build\blockchain-explorer\blockchain-explorer.exe ^ +build\crypto\fift.exe ^ +build\crypto\tlbc.exe ^ +build\crypto\func.exe ^ +build\crypto\create-state.exe ^ +build\validator-engine-console\validator-engine-console.exe ^ +build\tonlib\tonlib-cli.exe ^ +build\tonlib\tonlibjson.dll ^ +build\http\http-proxy.exe ^ +build\rldp-http-proxy\rldp-http-proxy.exe ^ +build\dht-server\dht-server.exe ^ +build\lite-client\lite-client.exe ^ +build\validator-engine\validator-engine.exe ^ +build\utils\generate-random-id.exe ^ +build\utils\json2tlo.exe ^ +build\adnl\adnl-proxy.exe ^ +build\emulator\emulator.dll) do (strip -g %%I & copy %%I artifacts\) +xcopy /e /k /h /i crypto\smartcont artifacts\smartcont +xcopy /e /k /h /i crypto\fift\lib artifacts\lib diff --git a/assembly/nix/build-linux-arm64-nix.sh b/assembly/nix/build-linux-arm64-nix.sh new file mode 100644 index 00000000..bb859141 --- /dev/null +++ b/assembly/nix/build-linux-arm64-nix.sh @@ -0,0 +1,20 @@ +#/bin/bash + +nix-build --version +test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } + +cp assembly/nix/linux-arm64* . +cp assembly/nix/microhttpd.nix . +cp assembly/nix/openssl.nix . +export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +nix-build linux-arm64-static.nix +mkdir artifacts +cp ./result/bin/* artifacts/ +chmod +x artifacts/* +rm -rf result +nix-build linux-arm64-tonlib.nix +cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so +cp ./result/lib/libemulator.so artifacts/ +cp -r crypto/fift/lib artifacts/ +cp -r crypto/smartcont artifacts/ \ No newline at end of file diff --git a/assembly/nix/build-linux-x86-64-nix.sh b/assembly/nix/build-linux-x86-64-nix.sh new file mode 100644 index 00000000..eca6fe58 --- /dev/null +++ b/assembly/nix/build-linux-x86-64-nix.sh @@ -0,0 +1,20 @@ +#/bin/bash + +nix-build --version +test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } + +cp assembly/nix/linux-x86-64* . +cp assembly/nix/microhttpd.nix . +cp assembly/nix/openssl.nix . +export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +nix-build linux-x86-64-static.nix +mkdir artifacts +cp ./result/bin/* artifacts/ +chmod +x artifacts/* +rm -rf result +nix-build linux-x86-64-tonlib.nix +cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so +cp ./result/lib/libemulator.so artifacts/ +cp -r crypto/fift/lib artifacts/ +cp -r crypto/smartcont artifacts/ \ No newline at end of file diff --git a/assembly/nix/build-macos-nix.sh b/assembly/nix/build-macos-nix.sh new file mode 100644 index 00000000..fdf674a6 --- /dev/null +++ b/assembly/nix/build-macos-nix.sh @@ -0,0 +1,17 @@ +#/bin/bash + +nix-build --version +test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } + +cp assembly/nix/macos-* . +export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz +nix-build macos-static.nix +mkdir artifacts +cp ./result-bin/bin/* artifacts/ +chmod +x artifacts/* +rm -rf result-bin +nix-build macos-tonlib.nix +cp ./result/lib/libtonlibjson.dylib artifacts/ +cp ./result/lib/libemulator.dylib artifacts/ +cp -r crypto/fift/lib artifacts/ +cp -r crypto/smartcont artifacts/ \ No newline at end of file diff --git a/flake.lock b/assembly/nix/flakes/flake.lock similarity index 100% rename from flake.lock rename to assembly/nix/flakes/flake.lock diff --git a/flake.nix b/assembly/nix/flakes/flake.nix similarity index 100% rename from flake.nix rename to assembly/nix/flakes/flake.nix diff --git a/shell.nix b/assembly/nix/flakes/shell.nix similarity index 100% rename from shell.nix rename to assembly/nix/flakes/shell.nix diff --git a/assembly/nix/linux-arm64-static.nix b/assembly/nix/linux-arm64-static.nix new file mode 100644 index 00000000..616dfba5 --- /dev/null +++ b/assembly/nix/linux-arm64-static.nix @@ -0,0 +1,45 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: +let + microhttpdmy = (import ./microhttpd.nix) {}; +in +with import microhttpdmy; +stdenv.mkDerivation { + pname = "ton"; + version = "dev-bin"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ + cmake ninja git pkg-config + ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl microhttpdmy pkgsStatic.zlib pkgsStatic.libsodium.dev pkgsStatic.secp256k1 glibc.static + ]; + + makeStatic = true; + doCheck = true; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DBUILD_SHARED_LIBS=OFF" + "-DCMAKE_LINK_SEARCH_START_STATIC=ON" + "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + "-DMHD_FOUND=1" + "-DMHD_INCLUDE_DIR=${microhttpdmy}/usr/local/include" + "-DMHD_LIBRARY=${microhttpdmy}/usr/local/lib/libmicrohttpd.a" + "-DCMAKE_CTEST_ARGUMENTS=--timeout;1800" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-static" + ]; +} diff --git a/assembly/nix/linux-arm64-tonlib.nix b/assembly/nix/linux-arm64-tonlib.nix new file mode 100644 index 00000000..a753423b --- /dev/null +++ b/assembly/nix/linux-arm64-tonlib.nix @@ -0,0 +1,44 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz +{ + pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: +let + microhttpdmy = (import ./microhttpd.nix) {}; +in +with import microhttpdmy; +pkgs.llvmPackages_16.stdenv.mkDerivation { + pname = "ton"; + version = "dev-lib"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ + cmake ninja git pkg-config + ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl microhttpdmy pkgsStatic.zlib pkgsStatic.libsodium.dev pkgsStatic.secp256k1 + ]; + + dontAddStaticConfigureFlags = false; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DMHD_FOUND=1" + "-DMHD_INCLUDE_DIR=${microhttpdmy}/usr/local/include" + "-DMHD_LIBRARY=${microhttpdmy}/usr/local/lib/libmicrohttpd.a" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-fPIC" "-fcommon" + ]; + + ninjaFlags = [ + "tonlibjson" "emulator" + ]; +} diff --git a/assembly/nix/linux-x86-64-static.nix b/assembly/nix/linux-x86-64-static.nix new file mode 100644 index 00000000..616dfba5 --- /dev/null +++ b/assembly/nix/linux-x86-64-static.nix @@ -0,0 +1,45 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: +let + microhttpdmy = (import ./microhttpd.nix) {}; +in +with import microhttpdmy; +stdenv.mkDerivation { + pname = "ton"; + version = "dev-bin"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ + cmake ninja git pkg-config + ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl microhttpdmy pkgsStatic.zlib pkgsStatic.libsodium.dev pkgsStatic.secp256k1 glibc.static + ]; + + makeStatic = true; + doCheck = true; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DBUILD_SHARED_LIBS=OFF" + "-DCMAKE_LINK_SEARCH_START_STATIC=ON" + "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + "-DMHD_FOUND=1" + "-DMHD_INCLUDE_DIR=${microhttpdmy}/usr/local/include" + "-DMHD_LIBRARY=${microhttpdmy}/usr/local/lib/libmicrohttpd.a" + "-DCMAKE_CTEST_ARGUMENTS=--timeout;1800" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-static" + ]; +} diff --git a/assembly/nix/linux-x86-64-tonlib.nix b/assembly/nix/linux-x86-64-tonlib.nix new file mode 100644 index 00000000..ac183d2b --- /dev/null +++ b/assembly/nix/linux-x86-64-tonlib.nix @@ -0,0 +1,54 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.11.tar.gz +# copy linux-x86-64-tonlib.nix to git root directory and execute: +# nix-build linux-x86-64-tonlib.nix +{ + pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: +let + system = builtins.currentSystem; + + nixos1909 = (import (builtins.fetchTarball { + url = "https://channels.nixos.org/nixos-19.09/nixexprs.tar.xz"; + sha256 = "1vp1h2gkkrckp8dzkqnpcc6xx5lph5d2z46sg2cwzccpr8ay58zy"; + }) { inherit system; }); + glibc227 = nixos1909.glibc // { pname = "glibc"; }; + stdenv227 = let + cc = pkgs.wrapCCWith { + cc = nixos1909.buildPackages.gcc-unwrapped; + libc = glibc227; + bintools = pkgs.binutils.override { libc = glibc227; }; + }; + in (pkgs.overrideCC pkgs.stdenv cc); + +in +stdenv227.mkDerivation { + pname = "ton"; + version = "dev-lib"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ cmake ninja git pkg-config ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl pkgsStatic.zlib pkgsStatic.libmicrohttpd.dev pkgsStatic.libsodium.dev pkgsStatic.secp256k1 + ]; + + dontAddStaticConfigureFlags = false; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-fPIC" + ]; + + ninjaFlags = [ + "tonlibjson" "emulator" + ]; +} diff --git a/assembly/nix/macos-static.nix b/assembly/nix/macos-static.nix new file mode 100644 index 00000000..e65ec1a8 --- /dev/null +++ b/assembly/nix/macos-static.nix @@ -0,0 +1,65 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: + +pkgs.llvmPackages_14.stdenv.mkDerivation { + pname = "ton"; + version = "dev-bin"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ cmake ninja git pkg-config ]; + + buildInputs = with pkgs; + lib.forEach [ + secp256k1 libsodium.dev libmicrohttpd.dev gmp.dev nettle.dev libtasn1.dev libidn2.dev libunistring.dev gettext (gnutls.override { withP11-kit = false; }).dev + ] + (x: x.overrideAttrs(oldAttrs: rec { configureFlags = (oldAttrs.configureFlags or []) ++ [ "--enable-static" "--disable-shared" "--disable-tests" ]; dontDisableStatic = true; })) + ++ [ + darwin.apple_sdk.frameworks.CoreFoundation + (openssl.override { static = true; }).dev + (zlib.override { shared = false; }).dev + (libiconv.override { enableStatic = true; enableShared = false; }) + ]; + + + dontAddStaticConfigureFlags = true; + makeStatic = true; + doCheck = true; + + configureFlags = []; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DCMAKE_CROSSCOMPILING=OFF" + "-DCMAKE_LINK_SEARCH_START_STATIC=ON" + "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + "-DBUILD_SHARED_LIBS=OFF" + "-DCMAKE_CXX_FLAGS=-stdlib=libc++" + "-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.3" + "-DCMAKE_CTEST_ARGUMENTS=--timeout;1800" + ]; + + LDFLAGS = [ + "-static-libstdc++" + "-framework CoreFoundation" + ]; + + postInstall = '' + moveToOutput bin "$bin" + ''; + + preFixup = '' + for fn in "$bin"/bin/* "$out"/lib/*.dylib; do + echo Fixing libc++ in "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++.1 | cut -d' ' -f1 | xargs)" libc++.1.dylib "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++abi.1 | cut -d' ' -f1 | xargs)" libc++abi.dylib "$fn" + done + ''; + outputs = [ "bin" "out" ]; +} \ No newline at end of file diff --git a/assembly/nix/macos-tonlib.nix b/assembly/nix/macos-tonlib.nix new file mode 100644 index 00000000..c362de4e --- /dev/null +++ b/assembly/nix/macos-tonlib.nix @@ -0,0 +1,55 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: + +pkgs.llvmPackages_14.stdenv.mkDerivation { + pname = "ton"; + version = "dev-lib"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ cmake ninja git pkg-config ]; + + buildInputs = with pkgs; + lib.forEach [ + secp256k1 libsodium.dev libmicrohttpd.dev gmp.dev nettle.dev libtasn1.dev libidn2.dev libunistring.dev gettext (gnutls.override { withP11-kit = false; }).dev + ] (x: x.overrideAttrs(oldAttrs: rec { configureFlags = (oldAttrs.configureFlags or []) ++ [ "--enable-static" "--disable-shared" "--disable-tests" ]; dontDisableStatic = true; })) + ++ [ + darwin.apple_sdk.frameworks.CoreFoundation + (openssl.override { static = true; }).dev + (zlib.override { shared = false; }).dev + (libiconv.override { enableStatic = true; enableShared = false; }) + ]; + + dontAddStaticConfigureFlags = true; + + configureFlags = []; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DCMAKE_CXX_FLAGS=-stdlib=libc++" + "-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.3" + ]; + + LDFLAGS = [ + "-static-libstdc++" + "-framework CoreFoundation" + ]; + + ninjaFlags = [ + "tonlibjson" "emulator" + ]; + + preFixup = '' + for fn in $out/bin/* $out/lib/*.dylib; do + echo Fixing libc++ in "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++.1 | cut -d' ' -f1 | xargs)" libc++.1.dylib "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++abi.1 | cut -d' ' -f1 | xargs)" libc++abi.dylib "$fn" + done + ''; +} \ No newline at end of file diff --git a/assembly/nix/microhttpd.nix b/assembly/nix/microhttpd.nix new file mode 100644 index 00000000..4f871425 --- /dev/null +++ b/assembly/nix/microhttpd.nix @@ -0,0 +1,28 @@ +{ pkgs ? import { system = builtins.currentSystem; } +, stdenv ? pkgs.stdenv +, fetchgit ? pkgs.fetchgit +}: + +stdenv.mkDerivation rec { + name = "microhttpdmy"; + + + src = fetchgit { + url = "https://git.gnunet.org/libmicrohttpd.git"; + rev = "refs/tags/v0.9.77"; + sha256 = "sha256-x+nfB07PbZwBlFc6kZZFYiRpk0a3QN/ByHB+hC8na/o="; + }; + + nativeBuildInputs = with pkgs; [ automake libtool autoconf texinfo ]; + + buildInputs = with pkgs; [ ]; + + configurePhase = '' + ./autogen.sh + ./configure --enable-static --disable-tests --disable-benchmark --disable-shared --disable-https --with-pic + ''; + + installPhase = '' + make install DESTDIR=$out + ''; +} diff --git a/assembly/nix/openssl.nix b/assembly/nix/openssl.nix new file mode 100644 index 00000000..8d30aa50 --- /dev/null +++ b/assembly/nix/openssl.nix @@ -0,0 +1,30 @@ +{ pkgs ? import { system = builtins.currentSystem; } +, stdenv ? pkgs.stdenv +, fetchFromGitHub ? pkgs.fetchFromGitHub +}: + +stdenv.mkDerivation rec { + name = "opensslmy"; + + src = fetchFromGitHub { + owner = "openssl"; + repo = "openssl"; + rev = "refs/tags/openssl-3.1.4"; + sha256 = "sha256-Vvf1wiNb4ikg1lIS9U137aodZ2JzM711tSWMJFYWtWI="; + }; + + nativeBuildInputs = with pkgs; [ perl ]; + + buildInputs = with pkgs; [ ]; + + postPatch = '' + patchShebangs Configure + ''; + + configurePhase = '' + ./Configure no-shared + ''; + installPhase = '' + make install DESTDIR=$out + ''; +} diff --git a/.github/script/fift-func-wasm-build-ubuntu.sh b/assembly/wasm/fift-func-wasm-build-ubuntu.sh old mode 100755 new mode 100644 similarity index 71% rename from .github/script/fift-func-wasm-build-ubuntu.sh rename to assembly/wasm/fift-func-wasm-build-ubuntu.sh index feac19e3..9ca23cc0 --- a/.github/script/fift-func-wasm-build-ubuntu.sh +++ b/assembly/wasm/fift-func-wasm-build-ubuntu.sh @@ -1,7 +1,24 @@ # The script builds funcfift compiler to WASM -# dependencies: -#sudo apt-get install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip nodejs libevent-dev +# Execute these prerequisites first +# sudo apt update +# sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev \ +# libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip \ +# nodejs libsecp256k1-dev libsodium-dev automake libtool + +# wget https://apt.llvm.org/llvm.sh +# chmod +x llvm.sh +# sudo ./llvm.sh 16 all + +with_artifacts=false + +while getopts 'a' flag; do + case "${flag}" in + a) with_artifacts=true ;; + *) break + ;; + esac +done export CC=$(which clang-16) export CXX=$(which clang++-16) @@ -39,8 +56,7 @@ mkdir build cd build cmake -GNinja -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_CXX_STANDARD=17 \ --DZLIB_LIBRARY=/usr/lib/x86_64-linux-gnu/libz.so \ --DZLIB_INCLUDE_DIR=$ZLIB_DIR \ +-DOPENSSL_FOUND=1 \ -DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so \ @@ -62,8 +78,9 @@ cd emsdk ./emsdk install 3.1.19 ./emsdk activate 3.1.19 EMSDK_DIR=`pwd` +ls $EMSDK_DIR -source $EMSDK_DIR/emsdk_env.sh +. $EMSDK_DIR/emsdk_env.sh export CC=$(which emcc) export CXX=$(which em++) export CCACHE_DISABLE=1 @@ -71,7 +88,7 @@ export CCACHE_DISABLE=1 cd ../openssl make clean -emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test no-ui +emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test sed -i 's/CROSS_COMPILE=.*/CROSS_COMPILE=/g' Makefile sed -i 's/-ldl//g' Makefile sed -i 's/-O3/-Os/g' Makefile @@ -101,22 +118,42 @@ test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; } cd ../build emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release \ --DZLIB_LIBRARY=$ZLIB_DIR/libz.a \ +-DZLIB_FOUND=1 \ +-DZLIB_LIBRARIES=$ZLIB_DIR/libz.a \ -DZLIB_INCLUDE_DIR=$ZLIB_DIR \ +-DOPENSSL_FOUND=1 \ -DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.a \ -DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.a \ -DCMAKE_TOOLCHAIN_FILE=$EMSDK_DIR/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake \ -DCMAKE_CXX_FLAGS="-sUSE_ZLIB=1" \ +-DSECP256K1_FOUND=1 \ -DSECP256K1_INCLUDE_DIR=$SECP256K1_DIR/include \ -DSECP256K1_LIBRARY=$SECP256K1_DIR/.libs/libsecp256k1.a \ -DSODIUM_INCLUDE_DIR=$SODIUM_DIR/src/libsodium/include \ -DSODIUM_LIBRARY_RELEASE=$SODIUM_DIR/src/libsodium/.libs/libsodium.a \ --DSODIUM_LIBRARY_DEBUG=$SODIUM_DIR/src/libsodium/.libs/libsodium.a \ -DSODIUM_USE_STATIC_LIBS=ON .. test $? -eq 0 || { echo "Can't configure TON with emmake "; exit 1; } cp -R ../crypto/smartcont ../crypto/fift/lib crypto emmake make -j16 funcfiftlib func fift tlbc emulator-emscripten + +test $? -eq 0 || { echo "Can't compile TON with emmake "; exit 1; } + +if [ "$with_artifacts" = true ]; then + echo "Creating artifacts..." + cd .. + rm -rf artifacts + mkdir artifacts + ls build/crypto + cp build/crypto/fift* artifacts + cp build/crypto/func* artifacts + cp build/crypto/tlbc* artifacts + cp build/emulator/emulator-emscripten* artifacts + cp -R crypto/smartcont artifacts + cp -R crypto/fift/lib artifacts +fi + + diff --git a/blockchain-explorer/CMakeLists.txt b/blockchain-explorer/CMakeLists.txt index fc94e709..8aae8805 100644 --- a/blockchain-explorer/CMakeLists.txt +++ b/blockchain-explorer/CMakeLists.txt @@ -14,15 +14,28 @@ set(BLOCHAIN_EXPLORER_SOURCE add_executable(blockchain-explorer ${BLOCHAIN_EXPLORER_SOURCE}) if (NIX) - find_package(PkgConfig REQUIRED) - pkg_check_modules(MHD libmicrohttpd) - target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIRS} ${MHD_STATIC_INCLUDE_DIRS}) - target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARIES} ${MHD_STATIC_LIBRARIES}) + if (MHD_FOUND) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) + else() + find_package(PkgConfig REQUIRED) + pkg_check_modules(MHD libmicrohttpd) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR} ${MHD_STATIC_INCLUDE_DIRS}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARIES} ${MHD_STATIC_LIBRARIES}) + endif() else() - find_package(MHD) - target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIRS}) - target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARIES}) + if (MHD_FOUND) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) + else() + find_package(MHD) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) + endif() endif() +target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) +target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) + install(TARGETS blockchain-explorer RUNTIME DESTINATION bin) diff --git a/catchain/catchain-receiver.cpp b/catchain/catchain-receiver.cpp index 482cfb43..c8206de9 100644 --- a/catchain/catchain-receiver.cpp +++ b/catchain/catchain-receiver.cpp @@ -287,7 +287,9 @@ void CatChainReceiverImpl::add_block_cont_3(tl_object_ptrdelivered()); + LOG_CHECK(last_sent_block_->delivered()) + << "source=" << last_sent_block_->get_source_id() << " ill=" << last_sent_block_->is_ill() + << " height=" << last_sent_block_->get_height(); } active_send_ = false; diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index 62b0d216..0871d250 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -325,23 +325,20 @@ endif() if (MSVC) find_package(Sodium REQUIRED) target_compile_definitions(ton_crypto PUBLIC SODIUM_STATIC) - target_include_directories(ton_crypto_core PUBLIC $) - target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARIES}) -elseif (ANDROID) + target_include_directories(ton_crypto_core PUBLIC $) + target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARY}) +elseif (ANDROID OR EMSCRIPTEN) target_include_directories(ton_crypto_core PUBLIC $) target_link_libraries(ton_crypto PUBLIC $) else() - if (NOT USE_EMSCRIPTEN) + if (NOT SODIUM_FOUND) find_package(Sodium REQUIRED) - target_include_directories(ton_crypto_core PUBLIC $) - target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARIES}) else() - target_include_directories(ton_crypto_core PUBLIC $) - target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARY}) - endif() - if (NOT APPLE AND NOT USE_EMSCRIPTEN) - target_link_libraries(ton_crypto_core PUBLIC secp256k1) + message(STATUS "Using Sodium ${SODIUM_LIBRARY_RELEASE}") endif() + target_compile_definitions(ton_crypto PUBLIC SODIUM_STATIC) + target_include_directories(ton_crypto_core PUBLIC $) + target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARY}) endif() target_include_directories(ton_crypto_core PUBLIC $) @@ -356,7 +353,7 @@ add_executable(test-ed25519-crypto test/test-ed25519-crypto.cpp) target_include_directories(test-ed25519-crypto PUBLIC $) target_link_libraries(test-ed25519-crypto PUBLIC ton_crypto) -add_library(fift-lib ${FIFT_SOURCE}) +add_library(fift-lib STATIC ${FIFT_SOURCE}) target_include_directories(fift-lib PUBLIC $) target_link_libraries(fift-lib PUBLIC ton_crypto ton_db tdutils ton_block) if (USE_EMSCRIPTEN) @@ -400,6 +397,7 @@ if (USE_EMSCRIPTEN) target_link_options(funcfiftlib PRIVATE -sIGNORE_MISSING_MAIN=1) target_link_options(funcfiftlib PRIVATE -sAUTO_NATIVE_LIBRARIES=0) target_link_options(funcfiftlib PRIVATE -sMODULARIZE=1) + target_link_options(funcfiftlib PRIVATE -sTOTAL_MEMORY=33554432) target_link_options(funcfiftlib PRIVATE -sALLOW_MEMORY_GROWTH=1) target_link_options(funcfiftlib PRIVATE -sALLOW_TABLE_GROWTH=1) target_link_options(funcfiftlib PRIVATE --embed-file ${CMAKE_CURRENT_SOURCE_DIR}/fift/lib@/fiftlib) @@ -506,7 +504,7 @@ if (NOT CMAKE_CROSSCOMPILING OR USE_EMSCRIPTEN) GenFif(DEST smartcont/auto/simple-wallet-ext-code SOURCE smartcont/simple-wallet-ext-code.fc NAME simple-wallet-ext) endif() -add_library(smc-envelope ${SMC_ENVELOPE_SOURCE}) +add_library(smc-envelope STATIC ${SMC_ENVELOPE_SOURCE}) target_include_directories(smc-envelope PUBLIC $) target_link_libraries(smc-envelope PUBLIC ton_crypto PRIVATE tdutils ton_block) if (NOT CMAKE_CROSSCOMPILING) diff --git a/crypto/test/test-smartcont.cpp b/crypto/test/test-smartcont.cpp index 98534bc5..7f512cea 100644 --- a/crypto/test/test-smartcont.cpp +++ b/crypto/test/test-smartcont.cpp @@ -489,7 +489,7 @@ void do_test_wallet(int revision) { auto address = std::move(res.address); auto iwallet = std::move(res.wallet); auto public_key = priv_key.get_public_key().move_as_ok().as_octet_string(); - ; + check_wallet_state(iwallet, 1, 123, public_key); // lets send a lot of messages @@ -1026,7 +1026,7 @@ class CheckedDns { } return action; }); - auto query = dns_->create_update_query(key_.value(), smc_actions).move_as_ok(); + auto query = dns_->create_update_query(key_.value(), smc_actions, query_id_++).move_as_ok(); CHECK(dns_.write().send_external_message(std::move(query)).code == 0); } map_dns_.update(entries); @@ -1081,6 +1081,7 @@ class CheckedDns { using ManualDns = ton::ManualDns; td::optional key_; td::Ref dns_; + td::uint32 query_id_ = 1; // Query id serve as "valid until", but in tests now() == 0 MapDns map_dns_; td::optional combined_map_dns_; diff --git a/crypto/vm/boc.h b/crypto/vm/boc.h index 1bff5b25..09ae1b66 100644 --- a/crypto/vm/boc.h +++ b/crypto/vm/boc.h @@ -52,6 +52,7 @@ class NewCellStorageStat { bool operator==(const Stat& other) const { return key() == other.key(); } + Stat(const Stat& other) = default; Stat& operator=(const Stat& other) = default; Stat& operator+=(const Stat& other) { cells += other.cells; diff --git a/example/android/README.md b/example/android/README.md index f17ba9d2..cf12ba30 100644 --- a/example/android/README.md +++ b/example/android/README.md @@ -6,27 +6,9 @@ Prerequisite: installed Java and set environment variable JAVA_HOME. ```bash git clone --recursive https://github.com/ton-blockchain/ton.git cd ton -wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip -unzip android-ndk-r25b-linux.zip -export JAVA_AWT_LIBRARY=NotNeeded -export JAVA_JVM_LIBRARY=NotNeeded -export JAVA_INCLUDE_PATH=${JAVA_HOME}/include -export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include -export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux - -export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b -export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto -export SECP256K1_INCLUDE_DIR=$(pwd)/example/android/third_party/secp256k1/include -export SECP256K1_LIBRARY=$(pwd)/example/android/third_party/secp256k1/.libs/libsecp256k1.a -export SODIUM_INCLUDE_DIR=$(pwd)/example/android/third_party/libsodium/libsodium-android-westmere/include -export SODIUM_LIBRARY=$(pwd)/example/android/third_party/libsodium/libsodium-android-westmere/lib/libsodium.a - -rm -rf example/android/src/drinkless/org/ton/TonApi.java -cd example/android/ -cmake -GNinja -DTON_ONLY_TONLIB=ON . -ninja prepare_cross_compiling -rm CMakeCache.txt -./build-all.sh +cp assembly/android/build-android-tonlib.sh . +chmod +x build-android-tonlib.sh +sudo -E ./build-android-tonlib.sh ``` # Generation of Tonlib libraries for iOS in Xcode diff --git a/example/android/build-all.sh b/example/android/build-all.sh index f436e361..6f97dec0 100755 --- a/example/android/build-all.sh +++ b/example/android/build-all.sh @@ -1,4 +1,6 @@ #!/bin/bash +echo ANDROID_NDK_ROOT = $ANDROID_NDK_ROOT + echo Building tonlib for x86... echo ARCH="x86" ./build.sh || exit 1 diff --git a/example/android/build.sh b/example/android/build.sh index 7f170dbc..06217255 100755 --- a/example/android/build.sh +++ b/example/android/build.sh @@ -42,14 +42,22 @@ ARCH=$ABI mkdir -p build-$ARCH cd build-$ARCH -cmake .. -GNinja -DPORTABLE=1 \ --DANDROID_ABI=x86 -DANDROID_PLATFORM=android-32 -DANDROID_NDK=${ANDROID_NDK_ROOT} \ --DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ --DCMAKE_BUILD_TYPE=Release -DANDROID_ABI=${ABI} \ --DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ORIG_ARCH} -DTON_ARCH="" \ +cmake .. -GNinja \ +-DPORTABLE=1 \ -DTON_ONLY_TONLIB=ON \ --DSECP256K1_INCLUDE_DIR=${SECP256K1_INCLUDE_DIR} -DSECP256K1_LIBRARY=${SECP256K1_LIBRARY} \ --DSODIUM_INCLUDE_DIR=${SODIUM_INCLUDE_DIR} -DSODIUM_LIBRARY_RELEASE=${SODIUM_LIBRARY_RELEASE} \ +-DTON_ARCH="" \ +-DANDROID_ABI=x86 \ +-DANDROID_PLATFORM=android-32 \ +-DANDROID_NDK=${ANDROID_NDK_ROOT} \ +-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ +-DCMAKE_BUILD_TYPE=Release \ +-DANDROID_ABI=${ABI} \ +-DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ORIG_ARCH} \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=${SECP256K1_INCLUDE_DIR} \ +-DSECP256K1_LIBRARY=${SECP256K1_LIBRARY} \ +-DSODIUM_INCLUDE_DIR=${SODIUM_INCLUDE_DIR} \ +-DSODIUM_LIBRARY_RELEASE=${SODIUM_LIBRARY_RELEASE} \ -DSODIUM_USE_STATIC_LIBS=1 \ -DBLST_LIB=${BLST_LIBRARY} || exit 1 diff --git a/lite-client/CMakeLists.txt b/lite-client/CMakeLists.txt index 53e09d77..598a8d28 100644 --- a/lite-client/CMakeLists.txt +++ b/lite-client/CMakeLists.txt @@ -1,6 +1,6 @@ cmake_minimum_required(VERSION 3.5 FATAL_ERROR) -add_library(lite-client-common lite-client-common.cpp lite-client-common.h) +add_library(lite-client-common STATIC lite-client-common.cpp lite-client-common.h) target_link_libraries(lite-client-common PUBLIC tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_crypto ton_block) add_executable(lite-client lite-client.cpp lite-client.h) diff --git a/tdnet/td/net/TcpListener.cpp b/tdnet/td/net/TcpListener.cpp index 7b7364ba..e711cbbd 100644 --- a/tdnet/td/net/TcpListener.cpp +++ b/tdnet/td/net/TcpListener.cpp @@ -46,9 +46,11 @@ void TcpListener::start_up() { } void TcpListener::tear_down() { - // unsubscribe from socket updates - // nb: interface will be changed - td::actor::SchedulerContext::get()->get_poll().unsubscribe(server_socket_fd_.get_poll_info().get_pollable_fd_ref()); + if (!server_socket_fd_.empty()) { + // unsubscribe from socket updates + // nb: interface will be changed + td::actor::SchedulerContext::get()->get_poll().unsubscribe(server_socket_fd_.get_poll_info().get_pollable_fd_ref()); + } } void TcpListener::loop() { diff --git a/tdnet/test/net-test.cpp b/tdnet/test/net-test.cpp index bb084a67..d20be504 100644 --- a/tdnet/test/net-test.cpp +++ b/tdnet/test/net-test.cpp @@ -158,9 +158,11 @@ void run_server(int from_port, int to_port, bool is_first, bool use_tcp) { TEST(Net, PingPong) { SET_VERBOSITY_LEVEL(VERBOSITY_NAME(ERROR)); + int port1 = td::Random::fast(10000, 10999); + int port2 = td::Random::fast(11000, 11999); for (auto use_tcp : {false, true}) { - auto a = td::thread([use_tcp] { run_server(8091, 8092, true, use_tcp); }); - auto b = td::thread([use_tcp] { run_server(8092, 8091, false, use_tcp); }); + auto a = td::thread([=] { run_server(port1, port2, true, use_tcp); }); + auto b = td::thread([=] { run_server(port2, port1, false, use_tcp); }); a.join(); b.join(); } diff --git a/tdutils/td/utils/as.h b/tdutils/td/utils/as.h index c60c74e2..6015af29 100644 --- a/tdutils/td/utils/as.h +++ b/tdutils/td/utils/as.h @@ -76,12 +76,7 @@ class ConstAs { } // namespace detail -// no std::is_trivially_copyable in libstdc++ before 5.0 -#if __GLIBCXX__ -#define TD_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T) -#else #define TD_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable::value -#endif template = 0> diff --git a/test/test-catchain.cpp b/test/test-catchain.cpp index 149ea3e3..3131c2b9 100644 --- a/test/test-catchain.cpp +++ b/test/test-catchain.cpp @@ -186,6 +186,7 @@ class CatChainInst : public td::actor::Actor { void create_fork() { auto height = height_ - 1; //td::Random::fast(0, height_ - 1); + LOG(WARNING) << "Creating fork, source_id=" << idx_ << ", height=" << height; auto sum = prev_values_[height] + 1; td::uint64 x[2]; @@ -241,7 +242,8 @@ int main(int argc, char *argv[]) { td::actor::send_closure(adnl, &ton::adnl::Adnl::register_network_manager, network_manager.get()); }); - for (td::uint32 att = 0; att < 10; att++) { + for (td::uint32 att = 0; att < 20; att++) { + LOG(WARNING) << "Test #" << att; nodes.resize(total_nodes); scheduler.run_in_context([&] { @@ -296,7 +298,10 @@ int main(int argc, char *argv[]) { std::cout << "value=" << n.get_actor_unsafe().value() << std::endl; } - scheduler.run_in_context([&] { td::actor::send_closure(inst[0], &CatChainInst::create_fork); }); + td::uint32 fork_cnt = att < 10 ? 1 : (att - 10) / 5 + 2; + for (td::uint32 idx = 0; idx < fork_cnt; ++idx) { + scheduler.run_in_context([&] { td::actor::send_closure(inst[idx], &CatChainInst::create_fork); }); + } t = td::Timestamp::in(1.0); while (scheduler.run(1)) { diff --git a/tonlib/test/offline.cpp b/tonlib/test/offline.cpp index 35a95f5b..0fedc865 100644 --- a/tonlib/test/offline.cpp +++ b/tonlib/test/offline.cpp @@ -467,15 +467,20 @@ TEST(Tonlib, KeysApi) { make_object(make_object(key->public_key_, key->secret_.copy()))) .move_as_ok(); - auto err1 = sync_send(client, make_object( - new_local_password.copy(), td::SecureString("wrong password"), - make_object(copy_word_list()))) - .move_as_error(); + auto err1 = sync_send( + client, make_object(new_local_password.copy(), td::SecureString("wrong password"), + make_object(copy_word_list()))); + if (err1.is_ok()) { + if (err1.ok()->public_key_ != key->public_key_) { + err1 = td::Status::Error("imported key successfully, but the public key is different"); + } + } + err1.ensure_error(); auto err2 = sync_send(client, make_object(new_local_password.copy(), td::SecureString(), - make_object(copy_word_list()))) - .move_as_error(); - LOG(INFO) << err1 << " | " << err2; + make_object(copy_word_list()))); + err2.ensure_error(); + LOG(INFO) << err1.move_as_error() << " | " << err2.move_as_error(); auto imported_key = sync_send(client, make_object(new_local_password.copy(), mnemonic_password.copy(), make_object(copy_word_list()))) diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index 9ec664bb..bee7ef40 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -4995,6 +4995,8 @@ td::Status TonlibClient::do_request(const tonlib_api::importKey& request, if (!request.exported_key_) { return TonlibError::EmptyField("exported_key"); } + // Note: the mnemonic is considered valid if a certain hash starts with zero byte (see Mnemonic::is_basic_seed()) + // Therefore, importKey with invalid password has 1/256 chance to return OK TRY_RESULT(key, key_storage_.import_key(std::move(request.local_password_), std::move(request.mnemonic_password_), KeyStorage::ExportedKey{std::move(request.exported_key_->word_list_)})); TRY_RESULT(key_bytes, public_key_from_bytes(key.public_key.as_slice())); From edb0d0f0aab98ee8a08d6b100f43dec205b5aa5a Mon Sep 17 00:00:00 2001 From: Andrey Pfau Date: Tue, 16 Jan 2024 10:59:47 +0400 Subject: [PATCH 55/71] Add Editorconfig (#800) * editorconfig * indent 2 * .editorconfig update --- .editorconfig | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .editorconfig diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..78c6ddee --- /dev/null +++ b/.editorconfig @@ -0,0 +1,8 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +indent_size = 2 From 4303e49c93939eeeb51913195f54dc525f50e84c Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Tue, 16 Jan 2024 10:59:16 +0300 Subject: [PATCH 56/71] Postpone addition of overlay for block broadcasting (#842) This reverts commit a52045bd91004bccabf10fd159371ffbbf60ffad. --- adnl/adnl-peer-table.hpp | 4 - adnl/adnl.h | 2 - tl/generate/scheme/ton_api.tl | 2 - tl/generate/scheme/ton_api.tlo | Bin 86348 -> 86116 bytes ton/ton-types.h | 8 -- validator/CMakeLists.txt | 4 +- validator/full-node-private-overlay.cpp | 175 ------------------------ validator/full-node-private-overlay.hpp | 86 ------------ validator/full-node.cpp | 51 ------- validator/full-node.hpp | 7 - 10 files changed, 1 insertion(+), 338 deletions(-) delete mode 100644 validator/full-node-private-overlay.cpp delete mode 100644 validator/full-node-private-overlay.hpp diff --git a/adnl/adnl-peer-table.hpp b/adnl/adnl-peer-table.hpp index 1c30b84c..2a27a802 100644 --- a/adnl/adnl-peer-table.hpp +++ b/adnl/adnl-peer-table.hpp @@ -77,10 +77,6 @@ class AdnlPeerTableImpl : public AdnlPeerTable { td::actor::ActorId channel) override; void unregister_channel(AdnlChannelIdShort id) override; - void check_id_exists(AdnlNodeIdShort id, td::Promise promise) override { - promise.set_value(local_ids_.count(id)); - } - void write_new_addr_list_to_db(AdnlNodeIdShort local_id, AdnlNodeIdShort peer_id, AdnlDbItem node, td::Promise promise) override; void get_addr_list_from_db(AdnlNodeIdShort local_id, AdnlNodeIdShort peer_id, diff --git a/adnl/adnl.h b/adnl/adnl.h index a1c39d5e..b7dad216 100644 --- a/adnl/adnl.h +++ b/adnl/adnl.h @@ -97,8 +97,6 @@ class Adnl : public AdnlSenderInterface { virtual void add_id_ex(AdnlNodeIdFull id, AdnlAddressList addr_list, td::uint8 cat, td::uint32 mode) = 0; virtual void del_id(AdnlNodeIdShort id, td::Promise promise) = 0; - virtual void check_id_exists(AdnlNodeIdShort id, td::Promise promise) = 0; - // subscribe to (some) messages(+queries) to this local id virtual void subscribe(AdnlNodeIdShort dst, std::string prefix, std::unique_ptr callback) = 0; virtual void unsubscribe(AdnlNodeIdShort dst, std::string prefix) = 0; diff --git a/tl/generate/scheme/ton_api.tl b/tl/generate/scheme/ton_api.tl index 7c9d9d5d..346d9152 100644 --- a/tl/generate/scheme/ton_api.tl +++ b/tl/generate/scheme/ton_api.tl @@ -394,8 +394,6 @@ tonNode.newShardBlockBroadcast block:tonNode.newShardBlock = tonNode.Broadcast; tonNode.shardPublicOverlayId workchain:int shard:long zero_state_file_hash:int256 = tonNode.ShardPublicOverlayId; -tonNode.privateBlockOverlayId zero_state_file_hash:int256 nodes:(vector int256) = tonNode.PrivateBlockOverlayId; - tonNode.keyBlocks blocks:(vector tonNode.blockIdExt) incomplete:Bool error:Bool = tonNode.KeyBlocks; ton.blockId root_cell_hash:int256 file_hash:int256 = ton.BlockId; diff --git a/tl/generate/scheme/ton_api.tlo b/tl/generate/scheme/ton_api.tlo index 34f31816487195f10af67445b0923d8f5474c3f3..5b8e1c725e39ceff4097e4a654c796c9f84ff73e 100644 GIT binary patch delta 41 scmX@Ji1o<=R^CUm^{p77;NM1GXWh*`x-Kk?HJdLQZr{7vV)4`mrcA!aEVVg6*M)_#cJoxj?R#}mv=m^|!oUDhbMu&x57Xq<55|)(MA@)_L^eCT HS4ji_gWEXS diff --git a/ton/ton-types.h b/ton/ton-types.h index 24d54259..867584fa 100644 --- a/ton/ton-types.h +++ b/ton/ton-types.h @@ -353,14 +353,6 @@ struct BlockBroadcast { td::uint32 validator_set_hash; td::BufferSlice data; td::BufferSlice proof; - - BlockBroadcast clone() const { - std::vector new_signatures; - for (const BlockSignature& s : signatures) { - new_signatures.emplace_back(s.node, s.signature.clone()); - } - return {block_id, std::move(new_signatures), catchain_seqno, validator_set_hash, data.clone(), proof.clone()}; - } }; struct Ed25519_PrivateKey { diff --git a/validator/CMakeLists.txt b/validator/CMakeLists.txt index 8de60081..4ecc865c 100644 --- a/validator/CMakeLists.txt +++ b/validator/CMakeLists.txt @@ -143,9 +143,7 @@ set(FULL_NODE_SOURCE full-node-master.h full-node-master.hpp full-node-master.cpp - full-node-private-overlay.hpp - full-node-private-overlay.cpp - + net/download-block.hpp net/download-block.cpp net/download-block-new.hpp diff --git a/validator/full-node-private-overlay.cpp b/validator/full-node-private-overlay.cpp deleted file mode 100644 index ea72230b..00000000 --- a/validator/full-node-private-overlay.cpp +++ /dev/null @@ -1,175 +0,0 @@ -/* - This file is part of TON Blockchain Library. - - TON Blockchain Library is free software: you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation, either version 2 of the License, or - (at your option) any later version. - - TON Blockchain Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with TON Blockchain Library. If not, see . -*/ -#pragma once - -#include "full-node-private-overlay.hpp" -#include "ton/ton-tl.hpp" -#include "common/delay.h" - -namespace ton { - -namespace validator { - -namespace fullnode { - -void FullNodePrivateOverlay::process_broadcast(PublicKeyHash, ton_api::tonNode_blockBroadcast &query) { - std::vector signatures; - for (auto &sig : query.signatures_) { - signatures.emplace_back(BlockSignature{sig->who_, std::move(sig->signature_)}); - } - - BlockIdExt block_id = create_block_id(query.id_); - BlockBroadcast B{block_id, - std::move(signatures), - static_cast(query.catchain_seqno_), - static_cast(query.validator_set_hash_), - std::move(query.data_), - std::move(query.proof_)}; - - auto P = td::PromiseCreator::lambda([](td::Result R) { - if (R.is_error()) { - if (R.error().code() == ErrorCode::notready) { - LOG(DEBUG) << "dropped broadcast: " << R.move_as_error(); - } else { - LOG(INFO) << "dropped broadcast: " << R.move_as_error(); - } - } - }); - td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::prevalidate_block, std::move(B), - std::move(P)); -} - -void FullNodePrivateOverlay::process_broadcast(PublicKeyHash, ton_api::tonNode_newShardBlockBroadcast &query) { - td::actor::send_closure(validator_manager_, &ValidatorManagerInterface::new_shard_block, - create_block_id(query.block_->block_), query.block_->cc_seqno_, - std::move(query.block_->data_)); -} - -void FullNodePrivateOverlay::receive_broadcast(PublicKeyHash src, td::BufferSlice broadcast) { - auto B = fetch_tl_object(std::move(broadcast), true); - if (B.is_error()) { - return; - } - - ton_api::downcast_call(*B.move_as_ok(), [src, Self = this](auto &obj) { Self->process_broadcast(src, obj); }); -} - -void FullNodePrivateOverlay::send_shard_block_info(BlockIdExt block_id, CatchainSeqno cc_seqno, td::BufferSlice data) { - if (!inited_) { - return; - } - auto B = create_serialize_tl_object( - create_tl_object(create_tl_block_id(block_id), cc_seqno, std::move(data))); - if (B.size() <= overlay::Overlays::max_simple_broadcast_size()) { - td::actor::send_closure(overlays_, &overlay::Overlays::send_broadcast_ex, local_id_, overlay_id_, - local_id_.pubkey_hash(), 0, std::move(B)); - } else { - td::actor::send_closure(overlays_, &overlay::Overlays::send_broadcast_fec_ex, local_id_, overlay_id_, - local_id_.pubkey_hash(), overlay::Overlays::BroadcastFlagAnySender(), std::move(B)); - } -} - -void FullNodePrivateOverlay::send_broadcast(BlockBroadcast broadcast) { - if (!inited_) { - return; - } - std::vector> sigs; - for (auto &sig : broadcast.signatures) { - sigs.emplace_back(create_tl_object(sig.node, sig.signature.clone())); - } - auto B = create_serialize_tl_object( - create_tl_block_id(broadcast.block_id), broadcast.catchain_seqno, broadcast.validator_set_hash, std::move(sigs), - broadcast.proof.clone(), broadcast.data.clone()); - td::actor::send_closure(overlays_, &overlay::Overlays::send_broadcast_fec_ex, local_id_, overlay_id_, - local_id_.pubkey_hash(), overlay::Overlays::BroadcastFlagAnySender(), std::move(B)); -} - -void FullNodePrivateOverlay::start_up() { - std::sort(nodes_.begin(), nodes_.end()); - nodes_.erase(std::unique(nodes_.begin(), nodes_.end()), nodes_.end()); - - std::vector nodes; - for (const adnl::AdnlNodeIdShort &id : nodes_) { - nodes.push_back(id.bits256_value()); - } - auto X = create_hash_tl_object(zero_state_file_hash_, std::move(nodes)); - td::BufferSlice b{32}; - b.as_slice().copy_from(as_slice(X)); - overlay_id_full_ = overlay::OverlayIdFull{std::move(b)}; - overlay_id_ = overlay_id_full_.compute_short_id(); - - try_init(); -} - -void FullNodePrivateOverlay::try_init() { - // Sometimes adnl id is added to validator engine later (or not at all) - td::actor::send_closure( - adnl_, &adnl::Adnl::check_id_exists, local_id_, [SelfId = actor_id(this)](td::Result R) { - if (R.is_ok() && R.ok()) { - td::actor::send_closure(SelfId, &FullNodePrivateOverlay::init); - } else { - delay_action([SelfId]() { td::actor::send_closure(SelfId, &FullNodePrivateOverlay::try_init); }, - td::Timestamp::in(30.0)); - } - }); -} - -void FullNodePrivateOverlay::init() { - LOG(FULL_NODE_INFO) << "Creating private block overlay for adnl id " << local_id_ << " : " << nodes_.size() - << " nodes"; - class Callback : public overlay::Overlays::Callback { - public: - void receive_message(adnl::AdnlNodeIdShort src, overlay::OverlayIdShort overlay_id, td::BufferSlice data) override { - } - void receive_query(adnl::AdnlNodeIdShort src, overlay::OverlayIdShort overlay_id, td::BufferSlice data, - td::Promise promise) override { - } - void receive_broadcast(PublicKeyHash src, overlay::OverlayIdShort overlay_id, td::BufferSlice data) override { - td::actor::send_closure(node_, &FullNodePrivateOverlay::receive_broadcast, src, std::move(data)); - } - void check_broadcast(PublicKeyHash src, overlay::OverlayIdShort overlay_id, td::BufferSlice data, - td::Promise promise) override { - } - Callback(td::actor::ActorId node) : node_(node) { - } - - private: - td::actor::ActorId node_; - }; - - overlay::OverlayPrivacyRules rules{overlay::Overlays::max_fec_broadcast_size(), - overlay::CertificateFlags::AllowFec | overlay::CertificateFlags::Trusted, - {}}; - td::actor::send_closure(overlays_, &overlay::Overlays::create_private_overlay, local_id_, overlay_id_full_.clone(), - nodes_, std::make_unique(actor_id(this)), rules); - - td::actor::send_closure(rldp_, &rldp::Rldp::add_id, local_id_); - td::actor::send_closure(rldp2_, &rldp2::Rldp::add_id, local_id_); - inited_ = true; -} - -void FullNodePrivateOverlay::tear_down() { - if (inited_) { - td::actor::send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, local_id_, overlay_id_); - } -} - -} // namespace fullnode - -} // namespace validator - -} // namespace ton diff --git a/validator/full-node-private-overlay.hpp b/validator/full-node-private-overlay.hpp deleted file mode 100644 index 6463fda2..00000000 --- a/validator/full-node-private-overlay.hpp +++ /dev/null @@ -1,86 +0,0 @@ -/* - This file is part of TON Blockchain Library. - - TON Blockchain Library is free software: you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation, either version 2 of the License, or - (at your option) any later version. - - TON Blockchain Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with TON Blockchain Library. If not, see . -*/ -#pragma once - -#include "full-node.h" - -namespace ton { - -namespace validator { - -namespace fullnode { - -class FullNodePrivateOverlay : public td::actor::Actor { - public: - void process_broadcast(PublicKeyHash src, ton_api::tonNode_blockBroadcast &query); - void process_broadcast(PublicKeyHash src, ton_api::tonNode_newShardBlockBroadcast &query); - template - void process_broadcast(PublicKeyHash, T &) { - VLOG(FULL_NODE_WARNING) << "dropping unknown broadcast"; - } - void receive_broadcast(PublicKeyHash src, td::BufferSlice query); - - void send_shard_block_info(BlockIdExt block_id, CatchainSeqno cc_seqno, td::BufferSlice data); - void send_broadcast(BlockBroadcast broadcast); - - void start_up() override; - void tear_down() override; - - FullNodePrivateOverlay(adnl::AdnlNodeIdShort local_id, std::vector nodes, - FileHash zero_state_file_hash, FullNodeConfig config, - td::actor::ActorId keyring, td::actor::ActorId adnl, - td::actor::ActorId rldp, td::actor::ActorId rldp2, - td::actor::ActorId overlays, - td::actor::ActorId validator_manager) - : local_id_(local_id) - , nodes_(std::move(nodes)) - , zero_state_file_hash_(zero_state_file_hash) - , config_(config) - , keyring_(keyring) - , adnl_(adnl) - , rldp_(rldp) - , rldp2_(rldp2) - , overlays_(overlays) - , validator_manager_(validator_manager) { - } - - private: - adnl::AdnlNodeIdShort local_id_; - std::vector nodes_; - FileHash zero_state_file_hash_; - FullNodeConfig config_; - - td::actor::ActorId keyring_; - td::actor::ActorId adnl_; - td::actor::ActorId rldp_; - td::actor::ActorId rldp2_; - td::actor::ActorId overlays_; - td::actor::ActorId validator_manager_; - - bool inited_ = false; - overlay::OverlayIdFull overlay_id_full_; - overlay::OverlayIdShort overlay_id_; - - void try_init(); - void init(); -}; - -} // namespace fullnode - -} // namespace validator - -} // namespace ton diff --git a/validator/full-node.cpp b/validator/full-node.cpp index 5a822b26..ebba50a0 100644 --- a/validator/full-node.cpp +++ b/validator/full-node.cpp @@ -50,7 +50,6 @@ void FullNodeImpl::add_permanent_key(PublicKeyHash key, td::Promise pr for (auto &shard : shards_) { td::actor::send_closure(shard.second, &FullNodeShard::update_validators, all_validators_, sign_cert_by_); } - create_private_block_overlay(key); promise.set_value(td::Unit()); } @@ -75,7 +74,6 @@ void FullNodeImpl::del_permanent_key(PublicKeyHash key, td::Promise pr for (auto &shard : shards_) { td::actor::send_closure(shard.second, &FullNodeShard::update_validators, all_validators_, sign_cert_by_); } - private_block_overlays_.erase(key); promise.set_value(td::Unit()); } @@ -181,10 +179,6 @@ void FullNodeImpl::send_shard_block_info(BlockIdExt block_id, CatchainSeqno cc_s VLOG(FULL_NODE_WARNING) << "dropping OUT shard block info message to unknown shard"; return; } - if (!private_block_overlays_.empty()) { - td::actor::send_closure(private_block_overlays_.begin()->second, &FullNodePrivateOverlay::send_shard_block_info, - block_id, cc_seqno, data.clone()); - } td::actor::send_closure(shard, &FullNodeShard::send_shard_block_info, block_id, cc_seqno, std::move(data)); } @@ -194,10 +188,6 @@ void FullNodeImpl::send_broadcast(BlockBroadcast broadcast) { VLOG(FULL_NODE_WARNING) << "dropping OUT broadcast to unknown shard"; return; } - if (!private_block_overlays_.empty()) { - td::actor::send_closure(private_block_overlays_.begin()->second, &FullNodePrivateOverlay::send_broadcast, - broadcast.clone()); - } td::actor::send_closure(shard, &FullNodeShard::send_broadcast, std::move(broadcast)); } @@ -299,7 +289,6 @@ void FullNodeImpl::got_key_block_proof(td::Ref proof) { PublicKeyHash l = PublicKeyHash::zero(); std::vector keys; - std::map current_validators; for (td::int32 i = -1; i <= 1; i++) { auto r = config->get_total_validator_set(i < 0 ? i : 1 - i); if (r.not_null()) { @@ -310,18 +299,10 @@ void FullNodeImpl::got_key_block_proof(td::Ref proof) { if (local_keys_.count(key)) { l = key; } - if (i == 1) { - current_validators[key] = adnl::AdnlNodeIdShort{el.addr.is_zero() ? key.bits256_value() : el.addr}; - } } } } - if (current_validators != current_validators_) { - current_validators_ = std::move(current_validators); - update_private_block_overlays(); - } - if (keys == all_validators_) { return; } @@ -340,7 +321,6 @@ void FullNodeImpl::got_zero_block_state(td::Ref state) { PublicKeyHash l = PublicKeyHash::zero(); std::vector keys; - std::map current_validators; for (td::int32 i = -1; i <= 1; i++) { auto r = m->get_total_validator_set(i < 0 ? i : 1 - i); if (r.not_null()) { @@ -351,18 +331,10 @@ void FullNodeImpl::got_zero_block_state(td::Ref state) { if (local_keys_.count(key)) { l = key; } - if (i == 1) { - current_validators[key] = adnl::AdnlNodeIdShort{el.addr.is_zero() ? key.bits256_value() : el.addr}; - } } } } - if (current_validators != current_validators_) { - current_validators_ = std::move(current_validators); - update_private_block_overlays(); - } - if (keys == all_validators_) { return; } @@ -484,29 +456,6 @@ void FullNodeImpl::start_up() { std::make_unique(actor_id(this)), std::move(P)); } -void FullNodeImpl::update_private_block_overlays() { - private_block_overlays_.clear(); - if (local_keys_.empty()) { - return; - } - for (const auto &key : local_keys_) { - create_private_block_overlay(key); - } -} - -void FullNodeImpl::create_private_block_overlay(PublicKeyHash key) { - CHECK(local_keys_.count(key)); - if (current_validators_.count(key)) { - std::vector nodes; - for (const auto &p : current_validators_) { - nodes.push_back(p.second); - } - private_block_overlays_[key] = td::actor::create_actor( - "BlocksPrivateOverlay", current_validators_[key], std::move(nodes), zero_state_file_hash_, config_, keyring_, - adnl_, rldp_, rldp2_, overlays_, validator_manager_); - } -} - FullNodeImpl::FullNodeImpl(PublicKeyHash local_id, adnl::AdnlNodeIdShort adnl_id, FileHash zero_state_file_hash, FullNodeConfig config, td::actor::ActorId keyring, td::actor::ActorId adnl, td::actor::ActorId rldp, diff --git a/validator/full-node.hpp b/validator/full-node.hpp index 838700b5..fc2dd75c 100644 --- a/validator/full-node.hpp +++ b/validator/full-node.hpp @@ -23,7 +23,6 @@ //#include "ton-node-slave.h" #include "interfaces/proof.h" #include "interfaces/shard.h" -#include "full-node-private-overlay.hpp" #include #include @@ -112,15 +111,9 @@ class FullNodeImpl : public FullNode { PublicKeyHash sign_cert_by_; std::vector all_validators_; - std::map current_validators_; std::set local_keys_; FullNodeConfig config_; - - std::map> private_block_overlays_; - - void update_private_block_overlays(); - void create_private_block_overlay(PublicKeyHash key); }; } // namespace fullnode From be9498234881ee025ca7669b7ee7712f30aa153e Mon Sep 17 00:00:00 2001 From: Marat <98183742+dungeon-master-666@users.noreply.github.com> Date: Tue, 16 Jan 2024 10:18:54 +0100 Subject: [PATCH 57/71] [emulator] Fix emulating on account_none and set account block_lt (#815) * fix acc_deleted emulation case * set account.block_lt --- emulator/emulator-extern.cpp | 17 ++++++++++++++--- emulator/transaction-emulator.cpp | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/emulator/emulator-extern.cpp b/emulator/emulator-extern.cpp index 9f06964e..f8e2f724 100644 --- a/emulator/emulator-extern.cpp +++ b/emulator/emulator-extern.cpp @@ -103,6 +103,7 @@ const char *transaction_emulator_emulate_transaction(void *transaction_emulator, td::Ref addr_slice; auto account_slice = vm::load_cell_slice(shard_account.account); + bool account_exists = block::gen::t_Account.get_tag(account_slice) == block::gen::Account::account; if (block::gen::t_Account.get_tag(account_slice) == block::gen::Account::account_none) { if (msg_tag == block::gen::CommonMsgInfo::ext_in_msg_info) { block::gen::CommonMsgInfo::Record_ext_in_msg_info info; @@ -120,12 +121,14 @@ const char *transaction_emulator_emulate_transaction(void *transaction_emulator, } else { ERROR_RESPONSE(PSTRING() << "Only ext in and int message are supported"); } - } else { + } else if (block::gen::t_Account.get_tag(account_slice) == block::gen::Account::account) { block::gen::Account::Record_account account_record; if (!tlb::unpack(account_slice, account_record)) { ERROR_RESPONSE(PSTRING() << "Can't unpack account cell"); } addr_slice = std::move(account_record.addr); + } else { + ERROR_RESPONSE(PSTRING() << "Can't parse account cell"); } ton::WorkchainId wc; ton::StdSmcAddress addr; @@ -139,8 +142,16 @@ const char *transaction_emulator_emulate_transaction(void *transaction_emulator, now = (unsigned)std::time(nullptr); } bool is_special = wc == ton::masterchainId && emulator->get_config().is_special_smartcontract(addr); - if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), now, is_special)) { - ERROR_RESPONSE(PSTRING() << "Can't unpack shard account"); + if (account_exists) { + if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), now, is_special)) { + ERROR_RESPONSE(PSTRING() << "Can't unpack shard account"); + } + } else { + if (!account.init_new(now)) { + ERROR_RESPONSE(PSTRING() << "Can't init new account"); + } + account.last_trans_lt_ = shard_account.last_trans_lt; + account.last_trans_hash_ = shard_account.last_trans_hash; } auto result = emulator->emulate_transaction(std::move(account), message_cell, now, 0, block::transaction::Transaction::tr_ord); diff --git a/emulator/transaction-emulator.cpp b/emulator/transaction-emulator.cpp index 81cf2e9f..2e8ba037 100644 --- a/emulator/transaction-emulator.cpp +++ b/emulator/transaction-emulator.cpp @@ -42,6 +42,7 @@ td::Result> TransactionEmu if (!lt) { lt = (account.last_trans_lt_ / block::ConfigInfo::get_lt_align() + 1) * block::ConfigInfo::get_lt_align(); // next block after account_.last_trans_lt_ } + account.block_lt = lt - lt % block::ConfigInfo::get_lt_align(); compute_phase_cfg.libraries = std::make_unique(libraries_); compute_phase_cfg.ignore_chksig = ignore_chksig_; From a68b5cbe627d4b68da68f05079e0cfc75aacd59f Mon Sep 17 00:00:00 2001 From: SpyCheese Date: Tue, 16 Jan 2024 14:24:46 +0300 Subject: [PATCH 58/71] Improve validator session stats (#861) --- tl/generate/scheme/ton_api.tl | 5 +-- tl/generate/scheme/ton_api.tlo | Bin 86116 -> 86292 bytes validator-session/validator-session-types.h | 5 +++ validator-session/validator-session.cpp | 34 ++++++++++++++------ validator-session/validator-session.h | 1 + validator-session/validator-session.hpp | 4 ++- validator/manager.cpp | 11 ++++--- validator/validator-group.cpp | 16 ++++++++- 8 files changed, 58 insertions(+), 18 deletions(-) diff --git a/tl/generate/scheme/ton_api.tl b/tl/generate/scheme/ton_api.tl index 346d9152..21e8318f 100644 --- a/tl/generate/scheme/ton_api.tl +++ b/tl/generate/scheme/ton_api.tl @@ -742,11 +742,12 @@ http.server.config dhs:(vector http.server.dnsEntry) local_hosts:(vector http.se ---types--- -validatorSession.statsProducer id:int256 block_status:int block_timestamp:long = validatorSession.StatsProducer; +validatorSession.statsProducer id:int256 candidate_id:int256 block_status:int block_timestamp:long comment:string = validatorSession.StatsProducer; validatorSession.statsRound timestamp:long producers:(vector validatorSession.statsProducer) = validatorSession.StatsRound; -validatorSession.stats id:tonNode.blockId timestamp:long self:int256 creator:int256 total_validators:int total_weight:long +validatorSession.stats success:Bool id:tonNode.blockIdExt timestamp:long self:int256 session_id:int256 cc_seqno:int + creator:int256 total_validators:int total_weight:long signatures:int signatures_weight:long approve_signatures:int approve_signatures_weight:long first_round:int rounds:(vector validatorSession.statsRound) = validatorSession.Stats; diff --git a/tl/generate/scheme/ton_api.tlo b/tl/generate/scheme/ton_api.tlo index 5b8e1c725e39ceff4097e4a654c796c9f84ff73e..9b74f1c3bbef41b10dba639833177fe7f6b6df74 100644 GIT binary patch delta 201 zcmaE|fOX0u)(s8@Ebn*lHBWXiFkm?$`9XJcg@M2&u+a1#CPoPfkmSu{LO%Qq3=GoG z?rX9amnJ8t78g%`U@kE^BTZ&{2P>n>^e;?|3e!968AT@NbO=q}^TDy crnj&%`b=J6%mESl!^+qJW^M0aW9;z)069x8RsaA1 diff --git a/validator-session/validator-session-types.h b/validator-session/validator-session-types.h index 57957478..bcbaa8f7 100644 --- a/validator-session/validator-session-types.h +++ b/validator-session/validator-session-types.h @@ -74,8 +74,10 @@ struct ValidatorSessionStats { struct Producer { PublicKeyHash id = PublicKeyHash::zero(); + ValidatorSessionCandidateId candidate_id = ValidatorSessionCandidateId::zero(); int block_status = status_none; td::uint64 block_timestamp = 0; + std::string comment; }; struct Round { td::uint64 timestamp = 0; @@ -85,6 +87,9 @@ struct ValidatorSessionStats { td::uint32 first_round; std::vector rounds; + bool success = false; + ValidatorSessionId session_id = ValidatorSessionId::zero(); + CatchainSeqno cc_seqno = 0; td::uint64 timestamp = 0; PublicKeyHash self = PublicKeyHash::zero(); PublicKeyHash creator = PublicKeyHash::zero(); diff --git a/validator-session/validator-session.cpp b/validator-session/validator-session.cpp index e08d8a7e..88de0fa5 100644 --- a/validator-session/validator-session.cpp +++ b/validator-session/validator-session.cpp @@ -288,7 +288,7 @@ void ValidatorSessionImpl::process_broadcast(PublicKeyHash src, td::BufferSlice CHECK(!pending_reject_.count(block_id)); CHECK(!rejected_.count(block_id)); - stats_set_candidate_status(cur_round_, src, ValidatorSessionStats::status_received); + stats_set_candidate_status(cur_round_, src, block_id, ValidatorSessionStats::status_received); auto v = virtual_state_->choose_blocks_to_approve(description(), local_idx()); for (auto &b : v) { if (b && SentBlock::get_block_id(b) == block_id) { @@ -361,7 +361,8 @@ void ValidatorSessionImpl::process_query(PublicKeyHash src, td::BufferSlice data void ValidatorSessionImpl::candidate_decision_fail(td::uint32 round, ValidatorSessionCandidateId hash, std::string result, td::uint32 src, td::BufferSlice proof) { - stats_set_candidate_status(round, description().get_source_id(src), ValidatorSessionStats::status_rejected); + stats_set_candidate_status(round, description().get_source_id(src), hash, ValidatorSessionStats::status_rejected, + result); if (round != cur_round_) { return; } @@ -376,7 +377,8 @@ void ValidatorSessionImpl::candidate_decision_fail(td::uint32 round, ValidatorSe void ValidatorSessionImpl::candidate_decision_ok(td::uint32 round, ValidatorSessionCandidateId hash, RootHash root_hash, FileHash file_hash, td::uint32 src, td::uint32 ok_from) { - stats_set_candidate_status(round, description().get_source_id(src), ValidatorSessionStats::status_approved); + stats_set_candidate_status(round, description().get_source_id(src), hash, ValidatorSessionStats::status_approved, + PSTRING() << "ts=" << ok_from); if (round != cur_round_) { return; } @@ -812,15 +814,13 @@ void ValidatorSessionImpl::on_new_round(td::uint32 round) { if (!have_block) { callback_->on_block_skipped(cur_round_); } else { + cur_stats_.success = true; cur_stats_.timestamp = (td::uint64)td::Clocks::system(); - cur_stats_.total_validators = description().get_total_nodes(); - cur_stats_.total_weight = description().get_total_weight(); cur_stats_.signatures = (td::uint32)export_sigs.size(); cur_stats_.signatures_weight = signatures_weight; cur_stats_.approve_signatures = (td::uint32)export_approve_sigs.size(); cur_stats_.approve_signatures_weight = approve_signatures_weight; cur_stats_.creator = description().get_source_id(block->get_src_idx()); - cur_stats_.self = description().get_source_id(local_idx()); if (it == blocks_.end()) { callback_->on_block_committed(cur_round_, description().get_source_public_key(block->get_src_idx()), @@ -923,6 +923,12 @@ void ValidatorSessionImpl::destroy() { stop(); } +void ValidatorSessionImpl::get_current_stats(td::Promise promise) { + ValidatorSessionStats stats = cur_stats_; + stats.timestamp = (td::uint64)td::Clocks::system(); + promise.set_result(std::move(stats)); +} + void ValidatorSessionImpl::start_up() { CHECK(!rldp_.empty()); cur_round_ = 0; @@ -941,6 +947,10 @@ void ValidatorSessionImpl::start_up() { void ValidatorSessionImpl::stats_init() { cur_stats_ = ValidatorSessionStats(); cur_stats_.first_round = cur_round_; + cur_stats_.session_id = unique_hash_; + cur_stats_.total_validators = description().get_total_nodes(); + cur_stats_.total_weight = description().get_total_weight(); + cur_stats_.self = description().get_source_id(local_idx()); stats_add_round(); } @@ -961,20 +971,26 @@ void ValidatorSessionImpl::stats_add_round() { } } -void ValidatorSessionImpl::stats_set_candidate_status(td::uint32 round, PublicKeyHash src, int status) { +void ValidatorSessionImpl::stats_set_candidate_status(td::uint32 round, PublicKeyHash src, + ValidatorSessionCandidateId candidate_id, int status, + std::string comment) { if (round < cur_stats_.first_round || round - cur_stats_.first_round >= cur_stats_.rounds.size()) { return; } - auto& stats_round = cur_stats_.rounds[round - cur_stats_.first_round]; + auto &stats_round = cur_stats_.rounds[round - cur_stats_.first_round]; auto it = std::find_if(stats_round.producers.begin(), stats_round.producers.end(), - [&](const ValidatorSessionStats::Producer& p) { return p.id == src; }); + [&](const ValidatorSessionStats::Producer &p) { return p.id == src; }); if (it == stats_round.producers.end()) { return; } + it->candidate_id = candidate_id; if (it->block_status == ValidatorSessionStats::status_none) { it->block_timestamp = (td::uint64)td::Clocks::system(); } it->block_status = status; + if (!comment.empty()) { + it->comment = std::move(comment); + } } td::actor::ActorOwn ValidatorSession::create( diff --git a/validator-session/validator-session.h b/validator-session/validator-session.h index 376cac45..3f3b7ab9 100644 --- a/validator-session/validator-session.h +++ b/validator-session/validator-session.h @@ -91,6 +91,7 @@ class ValidatorSession : public td::actor::Actor { virtual void start() = 0; virtual void destroy() = 0; + virtual void get_current_stats(td::Promise promise) = 0; static td::actor::ActorOwn create( catchain::CatChainSessionId session_id, ValidatorSessionOptions opts, PublicKeyHash local_id, diff --git a/validator-session/validator-session.hpp b/validator-session/validator-session.hpp index 1717c99f..2dcbb46c 100644 --- a/validator-session/validator-session.hpp +++ b/validator-session/validator-session.hpp @@ -160,7 +160,8 @@ class ValidatorSessionImpl : public ValidatorSession { ValidatorSessionStats cur_stats_; void stats_init(); void stats_add_round(); - void stats_set_candidate_status(td::uint32 round, PublicKeyHash src, int status); + void stats_set_candidate_status(td::uint32 round, PublicKeyHash src, ValidatorSessionCandidateId candidate_id, + int status, std::string comment = ""); public: ValidatorSessionImpl(catchain::CatChainSessionId session_id, ValidatorSessionOptions opts, PublicKeyHash local_id, @@ -173,6 +174,7 @@ class ValidatorSessionImpl : public ValidatorSession { void start() override; void destroy() override; + void get_current_stats(td::Promise promise) override; void process_blocks(std::vector blocks); void finished_processing(); diff --git a/validator/manager.cpp b/validator/manager.cpp index dcda7b6e..21fa5887 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -2640,18 +2640,19 @@ void ValidatorManagerImpl::log_validator_session_stats(BlockIdExt block_id, } std::vector> rounds; - for (const auto& round : stats.rounds) { + for (const auto &round : stats.rounds) { std::vector> producers; - for (const auto& producer : round.producers) { + for (const auto &producer : round.producers) { producers.push_back(create_tl_object( - producer.id.bits256_value(), producer.block_status, producer.block_timestamp)); + producer.id.bits256_value(), producer.candidate_id, producer.block_status, producer.block_timestamp, + producer.comment)); } rounds.push_back(create_tl_object(round.timestamp, std::move(producers))); } auto obj = create_tl_object( - create_tl_block_id_simple(block_id.id), stats.timestamp, stats.self.bits256_value(), - stats.creator.bits256_value(), stats.total_validators, stats.total_weight, stats.signatures, + stats.success, create_tl_block_id(block_id), stats.timestamp, stats.self.bits256_value(), stats.session_id, + stats.cc_seqno, stats.creator.bits256_value(), stats.total_validators, stats.total_weight, stats.signatures, stats.signatures_weight, stats.approve_signatures, stats.approve_signatures_weight, stats.first_round, std::move(rounds)); std::string s = td::json_encode(td::ToJson(*obj.get()), false); diff --git a/validator/validator-group.cpp b/validator/validator-group.cpp index 7baf0f05..51217bf9 100644 --- a/validator/validator-group.cpp +++ b/validator/validator-group.cpp @@ -136,7 +136,8 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s std::vector approve_signatures, validatorsession::ValidatorSessionStats stats, td::Promise promise) { - if (round_id >= last_known_round_id_) { + stats.cc_seqno = validator_set_->get_catchain_seqno(); + if (round_id >= last_known_round_id_) { last_known_round_id_ = round_id + 1; } auto sig_set = create_signature_set(std::move(signatures)); @@ -354,6 +355,19 @@ void ValidatorGroup::start(std::vector prev, BlockIdExt min_masterch void ValidatorGroup::destroy() { if (!session_.empty()) { + td::actor::send_closure(session_, &validatorsession::ValidatorSession::get_current_stats, + [manager = manager_, cc_seqno = validator_set_->get_catchain_seqno(), + block_id = create_next_block_id(RootHash::zero(), FileHash::zero())]( + td::Result R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to get validator session stats: " << R.move_as_error(); + return; + } + auto stats = R.move_as_ok(); + stats.cc_seqno = cc_seqno; + td::actor::send_closure(manager, &ValidatorManager::log_validator_session_stats, block_id, + std::move(stats)); + }); auto ses = session_.release(); delay_action([ses]() mutable { td::actor::send_closure(ses, &validatorsession::ValidatorSession::destroy); }, td::Timestamp::in(10.0)); From 6f277b40bfaa4b429ab23c4c9576bebb1f18bef2 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 17 Jan 2024 09:57:31 +0300 Subject: [PATCH 59/71] Add changelog --- Changelog.md | 12 ++++++++++++ recent_changelog.md | 17 +++++++++-------- 2 files changed, 21 insertions(+), 8 deletions(-) diff --git a/Changelog.md b/Changelog.md index 3310d6a2..9fec686d 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,3 +1,15 @@ +## 2024.01 Update + +1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to not count gas on special accounts. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `gas_prices_v3` in `ConfigParam 20;`. + * Besides update of config temporally increases gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` to `special_gas_limit`, see [details](https://t.me/tonstatus/88). +2. Improvements in LS behavior + * Improved detection of the state with all shards applied to decrease rate of `Block is not applied` error + * Better error logs: `block not in db` and `block is not applied` separation + * Fix error in proof generation for blocks after merge +3. Improvements in DHT work and storage, CellDb, config.json ammendment, peer misbehavior detection, validator session stats collection, emulator. + +Besides the work of the core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection). + ## 2023.12 Update 1. Optimized message queue handling, now queue cleaning speed doesn't depend on total queue size diff --git a/recent_changelog.md b/recent_changelog.md index 5e122454..637f416c 100644 --- a/recent_changelog.md +++ b/recent_changelog.md @@ -1,10 +1,11 @@ -## 2023.12 Update +## 2024.01 Update -1. Optimized message queue handling, now queue cleaning speed doesn't depend on total queue size - * Cleaning delivered messages using lt augmentation instead of random search / consequtive walk - * Keeping root cell of queue message in memory until outdated (caching) -2. Changes to block collation/validation limits -3. Stop accepting new external message if message queue is overloaded -4. Introducing conditions for shard split/merge based on queue size +1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to not count gas on special accounts. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `gas_prices_v3` in `ConfigParam 20;`. + * Besides update of config temporally increases gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` to `special_gas_limit`, see [details](https://t.me/tonstatus/88). +2. Improvements in LS behavior + * Improved detection of the state with all shards applied to decrease rate of `Block is not applied` error + * Better error logs: `block not in db` and `block is not applied` separation + * Fix error in proof generation for blocks after merge +3. Improvements in DHT work and storage, CellDb, config.json ammendment, peer misbehavior detection, validator session stats collection, emulator. -Read [more](https://blog.ton.org/technical-report-december-5-inscriptions-launch-on-ton) on that update. +Besides the work of the core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection). From b1f2160510a5afd2c29c8a67cfc351013ff1028a Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 17 Jan 2024 12:01:34 +0300 Subject: [PATCH 60/71] Fix setting gas limits in transaction.cpp (#864) Co-authored-by: SpyCheese --- crypto/block/transaction.cpp | 11 ++++++----- validator/impl/external-message.cpp | 5 +++-- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index b4bd648b..ae50a710 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1220,7 +1220,6 @@ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& } else { cp.gas_max = gas_bought_for(cfg, balance.grams); } - cp.gas_credit = 0; if (trans_type != tr_ord || (account.is_special && cfg.special_gas_full)) { // may use all gas that can be bought using remaining balance cp.gas_limit = cp.gas_max; @@ -1228,10 +1227,12 @@ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& // originally use only gas bought using remaining message balance // if the message is "accepted" by the smart contract, the gas limit will be set to gas_max cp.gas_limit = std::min(gas_bought_for(cfg, msg_balance_remaining.grams), cp.gas_max); - if (!block::tlb::t_Message.is_internal(in_msg)) { - // external messages carry no balance, give them some credit to check whether they are accepted - cp.gas_credit = std::min(cfg.gas_credit, cp.gas_max); - } + } + if (trans_type == tr_ord && !block::tlb::t_Message.is_internal(in_msg)) { + // external messages carry no balance, give them some credit to check whether they are accepted + cp.gas_credit = std::min(cfg.gas_credit, cp.gas_max); + } else { + cp.gas_credit = 0; } LOG(DEBUG) << "gas limits: max=" << cp.gas_max << ", limit=" << cp.gas_limit << ", credit=" << cp.gas_credit; return true; diff --git a/validator/impl/external-message.cpp b/validator/impl/external-message.cpp index 5c1848aa..e7665afc 100644 --- a/validator/impl/external-message.cpp +++ b/validator/impl/external-message.cpp @@ -102,7 +102,7 @@ void ExtMessageQ::run_message(td::BufferSlice data, block::SizeLimitsConfig::Ext run_fetch_account_state( wc, addr, manager, - [promise = std::move(promise), msg_root = root, wc, + [promise = std::move(promise), msg_root = root, wc, addr, M](td::Result, UnixTime, LogicalTime, std::unique_ptr>> res) mutable { if (res.is_error()) { @@ -114,7 +114,8 @@ void ExtMessageQ::run_message(td::BufferSlice data, block::SizeLimitsConfig::Ext auto utime = std::get<1>(tuple); auto lt = std::get<2>(tuple); auto config = std::move(std::get<3>(tuple)); - if (!acc.unpack(shard_acc, utime, false)) { + bool special = wc == masterchainId && config->is_special_smartcontract(addr); + if (!acc.unpack(shard_acc, utime, special)) { promise.set_error(td::Status::Error(PSLICE() << "Failed to unpack account state")); } else { auto status = run_message_on_account(wc, &acc, utime, lt + 1, msg_root, std::move(config)); From 128a85bee568e84146f1e985a92ea85011d1e380 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Sun, 21 Jan 2024 12:59:59 +0300 Subject: [PATCH 61/71] Use Config 8 to activate new gas limit behavior instead of new GasLimitsPrices constructor (#867) * Remove gas_prices_v3, enable new gas limits by GlobalVersion = 5 * Change final date for higher gas limit --------- Co-authored-by: SpyCheese --- common/global-version.h | 2 +- crypto/block/block.tlb | 5 ----- crypto/block/mc-config.cpp | 4 ---- crypto/block/mc-config.h | 1 - crypto/block/transaction.cpp | 13 +++++-------- doc/GlobalVersions.md | 12 +++++++++++- validator/impl/validate-query.cpp | 1 + 7 files changed, 18 insertions(+), 20 deletions(-) diff --git a/common/global-version.h b/common/global-version.h index 01c1795d..9b23d46d 100644 --- a/common/global-version.h +++ b/common/global-version.h @@ -19,6 +19,6 @@ namespace ton { // See doc/GlobalVersions.md -const int SUPPORTED_VERSION = 4; +const int SUPPORTED_VERSION = 5; } diff --git a/crypto/block/block.tlb b/crypto/block/block.tlb index eb1f8b94..4b36f13b 100644 --- a/crypto/block/block.tlb +++ b/crypto/block/block.tlb @@ -696,11 +696,6 @@ gas_prices_ext#de gas_price:uint64 gas_limit:uint64 special_gas_limit:uint64 gas block_gas_limit:uint64 freeze_due_limit:uint64 delete_due_limit:uint64 = GasLimitsPrices; -// same fields as gas_prices_ext; behavior differs -gas_prices_v3#df gas_price:uint64 gas_limit:uint64 special_gas_limit:uint64 gas_credit:uint64 - block_gas_limit:uint64 freeze_due_limit:uint64 delete_due_limit:uint64 - = GasLimitsPrices; - gas_flat_pfx#d1 flat_gas_limit:uint64 flat_gas_price:uint64 other:GasLimitsPrices = GasLimitsPrices; diff --git a/crypto/block/mc-config.cpp b/crypto/block/mc-config.cpp index 51402142..0cef7322 100644 --- a/crypto/block/mc-config.cpp +++ b/crypto/block/mc-config.cpp @@ -654,13 +654,9 @@ td::Result Config::do_get_gas_limits_prices(td::Ref c res.delete_due_limit = r.delete_due_limit; }; block::gen::GasLimitsPrices::Record_gas_prices_ext rec; - block::gen::GasLimitsPrices::Record_gas_prices_v3 rec_v3; vm::CellSlice cs0 = cs; if (tlb::unpack(cs, rec)) { f(rec, rec.special_gas_limit); - } else if (tlb::unpack(cs = cs0, rec_v3)) { - f(rec_v3, rec_v3.special_gas_limit); - res.special_full_limit = true; } else { block::gen::GasLimitsPrices::Record_gas_prices rec0; if (tlb::unpack(cs = cs0, rec0)) { diff --git a/crypto/block/mc-config.h b/crypto/block/mc-config.h index dcc48b4b..caab93f3 100644 --- a/crypto/block/mc-config.h +++ b/crypto/block/mc-config.h @@ -349,7 +349,6 @@ struct GasLimitsPrices { td::uint64 block_gas_limit{0}; td::uint64 freeze_due_limit{0}; td::uint64 delete_due_limit{0}; - bool special_full_limit{false}; td::RefInt256 compute_gas_price(td::uint64 gas_used) const; }; diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index ae50a710..ec93e11c 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1039,12 +1039,8 @@ bool ComputePhaseConfig::parse_GasLimitsPrices_internal(Ref cs, t delete_due_limit = td::make_refint(r.delete_due_limit); }; block::gen::GasLimitsPrices::Record_gas_prices_ext rec; - block::gen::GasLimitsPrices::Record_gas_prices_v3 rec_v3; if (tlb::csr_unpack(cs, rec)) { f(rec, rec.special_gas_limit); - } else if (tlb::csr_unpack(cs, rec_v3)) { - f(rec_v3, rec_v3.special_gas_limit); - special_gas_full = true; } else { block::gen::GasLimitsPrices::Record_gas_prices rec0; if (tlb::csr_unpack(std::move(cs), rec0)) { @@ -1153,8 +1149,8 @@ namespace transaction { * not enough to clean up old queires, thus locking funds inside. * See comment in crypto/smartcont/highload-wallet-v2-code.fc for details on why this happened. * Account address: EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu - * It was proposed to validators to increase gas limit for this account for a limited amount of time (until 2024-02-16). - * It is activated by setting gas_prices_v3 in ConfigParam 20 (config_mc_gas_prices). + * It was proposed to validators to increase gas limit for this account for a limited amount of time (until 2024-02-29). + * It is activated by setting global version to 5 in ConfigParam 8. * This config change also activates new behavior for special accounts in masterchain. * * @param cfg The compute phase configuration. @@ -1164,10 +1160,10 @@ namespace transaction { * @returns True if gas_limit override is required, false otherwise */ static bool override_gas_limit(const ComputePhaseConfig& cfg, ton::UnixTime now, const Account& account) { - if (!cfg.mc_gas_prices.special_full_limit) { + if (!cfg.special_gas_full) { return false; } - ton::UnixTime until = 1708041600; // 2024-02-16 00:00:00 UTC + ton::UnixTime until = 1709164800; // 2024-02-29 00:00:00 UTC ton::WorkchainId wc = 0; const char* addr_hex = "FFBFD8F5AE5B2E1C7C3614885CB02145483DFAEE575F0DD08A72C366369211CD"; return now < until && account.workchain == wc && account.addr.to_hex() == addr_hex; @@ -3533,6 +3529,7 @@ td::Status FetchConfigParams::fetch_config_params( TRY_RESULT_PREFIX(mc_gas_prices, config.get_gas_limits_prices(true), "cannot unpack masterchain gas prices and limits: "); compute_phase_cfg->mc_gas_prices = std::move(mc_gas_prices); + compute_phase_cfg->special_gas_full = config.get_global_version() >= 5; storage_phase_cfg->enable_due_payment = config.get_global_version() >= 4; compute_phase_cfg->block_rand_seed = *rand_seed; compute_phase_cfg->max_vm_data_depth = size_limits.max_vm_data_depth; diff --git a/doc/GlobalVersions.md b/doc/GlobalVersions.md index ccfca940..7d9729f8 100644 --- a/doc/GlobalVersions.md +++ b/doc/GlobalVersions.md @@ -36,4 +36,14 @@ intermediate value before division (e.g. `(xy+w)/z`). * Flag +16 in actions "Send message", "Reserve", "Change library" causes bounce if action fails. ### Storage phase -* Unpaid storage fee is now saved to `due_payment` \ No newline at end of file +* Unpaid storage fee is now saved to `due_payment` + +## Version 5 +Version 5 enables higher gas limits for special contracts. + +* Gas limit for all transactions on special contracts is set to `special_gas_limit` from `ConfigParam 20` (which is 35M at the moment of writing). +Previously only ticktock transactions had this limit, while ordinary transactions could use up to `gas_limit` gas (1M). +* Gas usage of special contracts is not taken into account when checking block limits. This allows keeping masterchain block limits low +while having high gas limits for elector. +* Gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` is increased to `special_gas_limit * 2` until 2024-02-29. +See [this post](https://t.me/tonstatus/88) for details. \ No newline at end of file diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 94eded6c..f5dc28e2 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -945,6 +945,7 @@ bool ValidateQuery::fetch_config_params() { return fatal_error(mc_gas_prices.move_as_error_prefix("cannot unpack masterchain gas prices and limits: ")); } compute_phase_cfg_.mc_gas_prices = mc_gas_prices.move_as_ok(); + compute_phase_cfg_.special_gas_full = config_->get_global_version() >= 5; storage_phase_cfg_.enable_due_payment = config_->get_global_version() >= 4; compute_phase_cfg_.block_rand_seed = rand_seed_; compute_phase_cfg_.libraries = std::make_unique(config_->get_libraries_root(), 256); From 42d4c051efa4f4b4c1337dfe4681e080c1267cf2 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 22 Jan 2024 12:34:49 +0300 Subject: [PATCH 62/71] Disallow recursive library cells (#868) * Disallow recursive library cells * Stop emulation of external messages on accept --------- Co-authored-by: SpyCheese --- crypto/block/transaction.cpp | 1 + crypto/block/transaction.h | 1 + crypto/vm/cellops.cpp | 31 +++++++++++++++++++++++++++++ crypto/vm/cells/CellSlice.cpp | 5 +++++ crypto/vm/tonops.cpp | 4 ++++ crypto/vm/vm.h | 7 +++++++ validator/impl/external-message.cpp | 1 + 7 files changed, 50 insertions(+) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index ec93e11c..e43b9305 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1559,6 +1559,7 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { vm.set_global_version(cfg.global_version); vm.set_c7(prepare_vm_c7(cfg)); // tuple with SmartContractInfo vm.set_chksig_always_succeed(cfg.ignore_chksig); + vm.set_stop_on_accept_message(cfg.stop_on_accept_message); // vm.incr_stack_trace(1); // enable stack dump after each step LOG(DEBUG) << "starting VM"; diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index 7539efe0..1ed2dfd3 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -120,6 +120,7 @@ struct ComputePhaseConfig { std::unique_ptr suspended_addresses; SizeLimitsConfig size_limits; int vm_log_verbosity = 0; + bool stop_on_accept_message = false; ComputePhaseConfig() : gas_price(0), gas_limit(0), special_gas_limit(0), gas_credit(0) { compute_threshold(); diff --git a/crypto/vm/cellops.cpp b/crypto/vm/cellops.cpp index 9e10e072..9b702fcd 100644 --- a/crypto/vm/cellops.cpp +++ b/crypto/vm/cellops.cpp @@ -892,6 +892,37 @@ int exec_load_special_cell(VmState* st, bool quiet) { Stack& stack = st->get_stack(); VM_LOG(st) << "execute XLOAD" << (quiet ? "Q" : ""); auto cell = stack.pop_cell(); + auto r_loaded_cell = cell->load_cell(); + if (r_loaded_cell.is_error()) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "failed to load cell"}; + } + } + auto loaded_cell = r_loaded_cell.move_as_ok(); + if (loaded_cell.data_cell->is_special()) { + if (loaded_cell.data_cell->special_type() != CellTraits::SpecialType::Library) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "unexpected special cell"}; + } + } + CellSlice cs(std::move(loaded_cell)); + DCHECK(cs.size() == Cell::hash_bits + 8); + cell = st->load_library(cs.data_bits() + 8); + if (cell.is_null()) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "failed to load library cell"}; + } + } + } stack.push_cell(cell); if (quiet) { stack.push_bool(true); diff --git a/crypto/vm/cells/CellSlice.cpp b/crypto/vm/cells/CellSlice.cpp index e1df5759..cbfc9e50 100644 --- a/crypto/vm/cells/CellSlice.cpp +++ b/crypto/vm/cells/CellSlice.cpp @@ -1056,6 +1056,7 @@ std::ostream& operator<<(std::ostream& os, Ref cs_ref) { // If can_be_special is not null, then it is allowed to load special cell // Flag whether loaded cell is actually special will be stored into can_be_special VirtualCell::LoadedCell load_cell_slice_impl(Ref cell, bool* can_be_special) { + bool library_loaded = false; while (true) { auto* vm_state_interface = VmStateInterface::get(); if (vm_state_interface) { @@ -1076,6 +1077,10 @@ VirtualCell::LoadedCell load_cell_slice_impl(Ref cell, bool* can_be_specia *can_be_special = loaded_cell.data_cell->is_special(); } else if (loaded_cell.data_cell->is_special()) { if (loaded_cell.data_cell->special_type() == DataCell::SpecialType::Library) { + if (library_loaded) { + throw VmError{Excno::cell_und, "failed to load library cell: recursive library cells are not allowed"}; + } + library_loaded = true; if (vm_state_interface) { CellSlice cs(std::move(loaded_cell)); DCHECK(cs.size() == Cell::hash_bits + 8); diff --git a/crypto/vm/tonops.cpp b/crypto/vm/tonops.cpp index d150f30b..dce61797 100644 --- a/crypto/vm/tonops.cpp +++ b/crypto/vm/tonops.cpp @@ -67,6 +67,10 @@ int exec_set_gas_generic(VmState* st, long long new_gas_limit) { throw VmNoGas{}; } st->change_gas_limit(new_gas_limit); + if (st->get_stop_on_accept_message()) { + VM_LOG(st) << "External message is accepted, stopping TVM"; + return st->jump(td::Ref{true, 0}); + } return 0; } diff --git a/crypto/vm/vm.h b/crypto/vm/vm.h index 2066db4c..fd18f92d 100644 --- a/crypto/vm/vm.h +++ b/crypto/vm/vm.h @@ -98,6 +98,7 @@ class VmState final : public VmStateInterface { td::HashSet loaded_cells; int stack_trace{0}, debug_off{0}; bool chksig_always_succeed{false}; + bool stop_on_accept_message{false}; td::optional missing_library; td::uint16 max_data_depth = 512; // Default value int global_version{0}; @@ -381,6 +382,12 @@ class VmState final : public VmStateInterface { bool get_chksig_always_succeed() const { return chksig_always_succeed; } + void set_stop_on_accept_message(bool flag) { + stop_on_accept_message = flag; + } + bool get_stop_on_accept_message() const { + return stop_on_accept_message; + } Ref ref_to_cont(Ref cell) const { return td::make_ref(load_cell_slice_ref(std::move(cell)), get_cp()); } diff --git a/validator/impl/external-message.cpp b/validator/impl/external-message.cpp index e7665afc..073e7360 100644 --- a/validator/impl/external-message.cpp +++ b/validator/impl/external-message.cpp @@ -156,6 +156,7 @@ td::Status ExtMessageQ::run_message_on_account(ton::WorkchainId wc, } compute_phase_cfg_.libraries = std::make_unique(config->get_libraries_root(), 256); compute_phase_cfg_.with_vm_log = true; + compute_phase_cfg_.stop_on_accept_message = true; auto res = Collator::impl_create_ordinary_transaction(msg_root, acc, utime, lt, &storage_phase_cfg_, &compute_phase_cfg_, From 20f9271b72b5addf705c6f13f54472b2d9b2306c Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 22 Jan 2024 18:32:21 +0300 Subject: [PATCH 63/71] Fix test-validator-session-state (#869) Co-authored-by: SpyCheese --- test/test-validator-session-state.cpp | 30 ++++--------------- .../validator-session-description.hpp | 3 ++ 2 files changed, 9 insertions(+), 24 deletions(-) diff --git a/test/test-validator-session-state.cpp b/test/test-validator-session-state.cpp index 5ed08add..cb5d817f 100644 --- a/test/test-validator-session-state.cpp +++ b/test/test-validator-session-state.cpp @@ -34,6 +34,7 @@ #include "validator-session/validator-session-description.h" #include "validator-session/validator-session-state.h" +#include "validator-session/validator-session-description.hpp" #include #include @@ -48,17 +49,13 @@ class Description : public ton::validatorsession::ValidatorSessionDescription { return 0; } void *alloc(size_t size, size_t align, bool temp) override { - size = (size + 15) / 16 * 16; - td::uint32 idx = temp ? 1 : 0; - auto s = pdata_cur_[idx].fetch_add(size); - CHECK(s + size <= pdata_size_[idx]); - return static_cast(pdata_[idx] + s); + return (temp ? mem_temp_ : mem_perm_).alloc(size, align); } bool is_persistent(const void *ptr) const override { - return ptr == nullptr || (ptr >= pdata_[0] && ptr < pdata_[0] + pdata_size_[0]); + return mem_perm_.contains(ptr); } void clear_temp_memory() override { - pdata_cur_[1] = 0; + mem_temp_.clear(); } ton::PublicKeyHash get_source_id(td::uint32 idx) const override { @@ -189,21 +186,8 @@ class Description : public ton::validatorsession::ValidatorSessionDescription { return opts_; } - ~Description() { - delete[] pdata_[0]; - delete[] pdata_[1]; - } - Description(ton::validatorsession::ValidatorSessionOptions opts, td::uint32 total_nodes) - : opts_(opts), total_nodes_(total_nodes) { - pdata_size_[0] = - static_cast(std::numeric_limits::max() < (1ull << 32) ? 1ull << 30 : 1ull << 33); - pdata_size_[1] = 1 << 22; - pdata_[0] = new td::uint8[pdata_size_[0]]; - pdata_[1] = new td::uint8[pdata_size_[1]]; - pdata_cur_[0] = 0; - pdata_cur_[1] = 0; - + : opts_(opts), total_nodes_(total_nodes), mem_perm_(1 << 30), mem_temp_(1 << 22) { for (auto &el : cache_) { Cached v{nullptr}; el.store(v, std::memory_order_relaxed); @@ -224,9 +208,7 @@ class Description : public ton::validatorsession::ValidatorSessionDescription { }; std::array, cache_size> cache_; - td::uint8 *pdata_[2]; - std::atomic pdata_cur_[2]; - size_t pdata_size_[2]; + ton::validatorsession::ValidatorSessionDescriptionImpl::MemPool mem_perm_, mem_temp_; }; double myrand() { diff --git a/validator-session/validator-session-description.hpp b/validator-session/validator-session-description.hpp index 0c662b7a..5e09c694 100644 --- a/validator-session/validator-session-description.hpp +++ b/validator-session/validator-session-description.hpp @@ -58,6 +58,7 @@ class ValidatorSessionDescriptionImpl : public ValidatorSessionDescription { }; std::array, cache_size> cache_; + public: class MemPool { public: explicit MemPool(size_t chunk_size); @@ -71,6 +72,8 @@ class ValidatorSessionDescriptionImpl : public ValidatorSessionDescription { std::vector data_; size_t ptr_ = 0; }; + + private: MemPool mem_perm_ = MemPool(mem_chunk_size_perm); MemPool mem_temp_ = MemPool(mem_chunk_size_temp); From d91643face365912224a38c0416a3363e27d6906 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 22 Jan 2024 18:33:26 +0300 Subject: [PATCH 64/71] Fix getting shard client block id (#870) Co-authored-by: SpyCheese --- validator/manager.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/validator/manager.cpp b/validator/manager.cpp index 21fa5887..af01ff8a 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -2460,8 +2460,8 @@ void ValidatorManagerImpl::update_shard_client_state(BlockIdExt masterchain_bloc } void ValidatorManagerImpl::get_shard_client_state(bool from_db, td::Promise promise) { - if (!shard_client_.empty() && !from_db) { - td::actor::send_closure(shard_client_, &ShardClient::get_processed_masterchain_block_id, std::move(promise)); + if (shard_client_handle_ && !from_db) { + promise.set_result(shard_client_handle_->id()); } else { td::actor::send_closure(db_, &Db::get_shard_client_state, std::move(promise)); } From 2e231ec2ff74b950dabccdf490d9fcf8ea97ea27 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 22 Jan 2024 21:56:11 +0300 Subject: [PATCH 65/71] Count gas usage for ordinar transactions on special accounts in separate counter (#872) * Improve checking total gas usage in collator and validator --------- Co-authored-by: SpyCheese --- validator/impl/collator-impl.h | 2 +- validator/impl/collator.cpp | 10 +++++----- validator/impl/validate-query.cpp | 28 +++++++++++++++++++++------- validator/impl/validate-query.hpp | 1 + 4 files changed, 28 insertions(+), 13 deletions(-) diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index fe931cd6..c6ba27b4 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -244,7 +244,7 @@ class Collator final : public td::actor::Actor { Ref& in_msg); bool create_ticktock_transactions(int mask); bool create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask); - Ref create_ordinary_transaction(Ref msg_root); + Ref create_ordinary_transaction(Ref msg_root, bool is_special_tx = false); bool check_cur_validator_set(); bool unpack_last_mc_state(); bool unpack_last_state(); diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 1b7991f6..413c5be9 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -2667,8 +2667,7 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t return fatal_error(td::Status::Error( -666, std::string{"cannot serialize new transaction for smart contract "} + smc_addr.to_hex())); } - if (!trans->update_limits(*block_limit_status_, - /* with_gas = */ !(acc->is_special && compute_phase_cfg_.special_gas_full))) { + if (!trans->update_limits(*block_limit_status_, /* with_gas = */ false)) { return fatal_error(-666, "cannot update block limit status to include the new transaction"); } if (trans->commit(*acc).is_null()) { @@ -2684,10 +2683,11 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t * Creates an ordinary transaction using a given message. * * @param msg_root The root of the message to be processed serialized using Message TLB-scheme. + * @param is_special_tx True if creating a special transaction (mint/recover), false otherwise. * * @returns The root of the serialized transaction, or an empty reference if the transaction creation fails. */ -Ref Collator::create_ordinary_transaction(Ref msg_root) { +Ref Collator::create_ordinary_transaction(Ref msg_root, bool is_special_tx) { ton::StdSmcAddress addr; auto cs = vm::load_cell_slice(msg_root); bool external; @@ -2746,7 +2746,7 @@ Ref Collator::create_ordinary_transaction(Ref msg_root) { std::unique_ptr trans = res.move_as_ok(); if (!trans->update_limits(*block_limit_status_, - /* with_gas = */ !(acc->is_special && compute_phase_cfg_.special_gas_full))) { + /* with_gas = */ !(is_special_tx && compute_phase_cfg_.special_gas_full))) { fatal_error("cannot update block limit status to include the new transaction"); return {}; } @@ -3019,7 +3019,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R return -1; } // 1. create a Transaction processing this Message - auto trans_root = create_ordinary_transaction(msg.msg); + auto trans_root = create_ordinary_transaction(msg.msg, is_special != nullptr); if (trans_root.is_null()) { fatal_error("cannot create transaction for re-processing output message"); return -1; diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index f5dc28e2..2b3ccd85 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -4685,6 +4685,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT bool external{false}, ihr_delivered{false}, need_credit_phase{false}; // check input message block::CurrencyCollection money_imported(0), money_exported(0); + bool is_special_tx = false; // recover/mint transaction if (in_msg_root.not_null()) { auto in_descr_cs = in_msg_dict_->lookup(in_msg_root->get_hash().as_bitslice()); if (in_descr_cs.is_null()) { @@ -4700,6 +4701,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << " has an invalid InMsg record (not one of msg_import_ext, msg_import_fin, " "msg_import_imm or msg_import_ihr)"); } + is_special_tx = is_special_in_msg(*in_descr_cs); // once we know there is a InMsg with correct hash, we already know that it contains a message with this hash (by the verification of InMsg), so it is our message // have still to check its destination address and imported value // and that it refers to this transaction @@ -4717,7 +4719,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << " processed inbound message created later at logical time " << info.created_lt); } - if (info.created_lt != start_lt_ || !is_special_in_msg(*in_descr_cs)) { + if (info.created_lt != start_lt_ || !is_special_tx) { msg_proc_lt_.emplace_back(addr, lt, info.created_lt); } dest = std::move(info.dest); @@ -5057,19 +5059,31 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT return reject_query(PSTRING() << "cannot re-create the serialization of transaction " << lt << " for smart contract " << addr.to_hex()); } - if (!trs->update_limits(*block_limit_status_, - /* with_gas = */ !account.is_special && !trs->gas_limit_overridden, - /* with_size = */ false)) { + if (!trs->update_limits(*block_limit_status_, /* with_gas = */ false, /* with_size = */ false)) { return fatal_error(PSTRING() << "cannot update block limit status to include transaction " << lt << " of account " << addr.to_hex()); } - if (block_limit_status_->gas_used > block_limits_->gas.hard() + compute_phase_cfg_.gas_limit) { - // Note that block_limit_status_->gas_used does not include transactions in special accounts + + // Collator should stop if total gas usage exceeds limits, including transactions on special accounts, but without + // ticktocks and mint/recover. + // Here Validator checks a weaker condition + if (!is_special_tx && !trs->gas_limit_overridden && trans_type == block::transaction::Transaction::tr_ord) { + (account.is_special ? total_special_gas_used_ : total_gas_used_) += trs->gas_used(); + } + if (total_gas_used_ > block_limits_->gas.hard() + compute_phase_cfg_.gas_limit) { return reject_query(PSTRING() << "gas block limits are exceeded: total_gas_used > gas_limit_hard + trx_gas_limit (" - << "total_gas_used=" << block_limit_status_->gas_used + << "total_gas_used=" << total_gas_used_ << ", gas_limit_hard=" << block_limits_->gas.hard() << ", trx_gas_limit=" << compute_phase_cfg_.gas_limit << ")"); } + if (total_special_gas_used_ > block_limits_->gas.hard() + compute_phase_cfg_.special_gas_limit) { + return reject_query( + PSTRING() << "gas block limits are exceeded: total_special_gas_used > gas_limit_hard + special_gas_limit (" + << "total_special_gas_used=" << total_special_gas_used_ + << ", gas_limit_hard=" << block_limits_->gas.hard() + << ", special_gas_limit=" << compute_phase_cfg_.special_gas_limit << ")"); + } + auto trans_root2 = trs->commit(account); if (trans_root2.is_null()) { return reject_query(PSTRING() << "the re-created transaction " << lt << " for smart contract " << addr.to_hex() diff --git a/validator/impl/validate-query.hpp b/validator/impl/validate-query.hpp index ff8cc83c..8829ac61 100644 --- a/validator/impl/validate-query.hpp +++ b/validator/impl/validate-query.hpp @@ -195,6 +195,7 @@ class ValidateQuery : public td::actor::Actor { ton::LogicalTime prev_key_block_lt_; std::unique_ptr block_limits_; std::unique_ptr block_limit_status_; + td::uint64 total_gas_used_{0}, total_special_gas_used_{0}; LogicalTime start_lt_, end_lt_; UnixTime prev_now_{~0u}, now_{~0u}; From 9f1b370f2cc772d93100611c9d6e45e6623a575d Mon Sep 17 00:00:00 2001 From: neodix42 Date: Wed, 24 Jan 2024 09:09:10 +0100 Subject: [PATCH 66/71] Make execution of tests in Nix builds optional (#873) --- .github/workflows/ton-x86-64-linux.yml | 4 ++-- .github/workflows/ton-x86-64-macos.yml | 4 ++-- assembly/cicd/jenkins/test-builds.groovy | 10 +++++----- assembly/nix/build-linux-arm64-nix.sh | 19 +++++++++++++++++-- assembly/nix/build-linux-x86-64-nix.sh | 20 ++++++++++++++++++-- assembly/nix/build-macos-nix.sh | 20 ++++++++++++++++++-- assembly/nix/linux-arm64-static.nix | 3 ++- assembly/nix/linux-x86-64-static.nix | 3 ++- assembly/nix/macos-static.nix | 5 +++-- 9 files changed, 69 insertions(+), 19 deletions(-) diff --git a/.github/workflows/ton-x86-64-linux.yml b/.github/workflows/ton-x86-64-linux.yml index fdd91000..abbe1cca 100644 --- a/.github/workflows/ton-x86-64-linux.yml +++ b/.github/workflows/ton-x86-64-linux.yml @@ -24,7 +24,7 @@ jobs: run: | cp assembly/nix/build-linux-x86-64-nix.sh . chmod +x build-linux-x86-64-nix.sh - ./build-linux-x86-64-nix.sh + ./build-linux-x86-64-nix.sh -t - name: Simple binaries test run: | @@ -38,4 +38,4 @@ jobs: uses: actions/upload-artifact@master with: name: ton-x86_64-linux-binaries - path: artifacts \ No newline at end of file + path: artifacts diff --git a/.github/workflows/ton-x86-64-macos.yml b/.github/workflows/ton-x86-64-macos.yml index c0f90718..8c71f34a 100644 --- a/.github/workflows/ton-x86-64-macos.yml +++ b/.github/workflows/ton-x86-64-macos.yml @@ -20,7 +20,7 @@ jobs: run: | cp assembly/nix/build-macos-nix.sh . chmod +x build-macos-nix.sh - ./build-macos-nix.sh + ./build-macos-nix.sh -t - name: Simple binaries test run: | @@ -34,4 +34,4 @@ jobs: uses: actions/upload-artifact@master with: name: ton-x86_64-macos-binaries - path: artifacts \ No newline at end of file + path: artifacts diff --git a/assembly/cicd/jenkins/test-builds.groovy b/assembly/cicd/jenkins/test-builds.groovy index 380efedd..960ac8db 100644 --- a/assembly/cicd/jenkins/test-builds.groovy +++ b/assembly/cicd/jenkins/test-builds.groovy @@ -31,7 +31,7 @@ pipeline { sh ''' cp assembly/nix/build-linux-x86-64-nix.sh . chmod +x build-linux-x86-64-nix.sh - ./build-linux-x86-64-nix.sh + ./build-linux-x86-64-nix.sh -t ''' sh ''' cd artifacts @@ -69,7 +69,7 @@ pipeline { sh ''' cp assembly/nix/build-linux-arm64-nix.sh . chmod +x build-linux-arm64-nix.sh - ./build-linux-arm64-nix.sh + ./build-linux-arm64-nix.sh -t ''' sh ''' cd artifacts @@ -107,7 +107,7 @@ pipeline { sh ''' cp assembly/nix/build-macos-nix.sh . chmod +x build-macos-nix.sh - ./build-macos-nix.sh + ./build-macos-nix.sh -t ''' sh ''' cd artifacts @@ -145,7 +145,7 @@ pipeline { sh ''' cp assembly/nix/build-macos-nix.sh . chmod +x build-macos-nix.sh - ./build-macos-nix.sh + ./build-macos-nix.sh -t ''' sh ''' cd artifacts @@ -233,4 +233,4 @@ pipeline { } } } -} \ No newline at end of file +} diff --git a/assembly/nix/build-linux-arm64-nix.sh b/assembly/nix/build-linux-arm64-nix.sh index bb859141..08817e72 100644 --- a/assembly/nix/build-linux-arm64-nix.sh +++ b/assembly/nix/build-linux-arm64-nix.sh @@ -3,12 +3,27 @@ nix-build --version test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } +with_tests=false + + +while getopts 't' flag; do + case "${flag}" in + t) with_tests=true ;; + *) break + ;; + esac +done + cp assembly/nix/linux-arm64* . cp assembly/nix/microhttpd.nix . cp assembly/nix/openssl.nix . export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz -nix-build linux-arm64-static.nix +if [ "$with_tests" = true ]; then + nix-build linux-arm64-static.nix --arg testing true +else + nix-build linux-arm64-static.nix +fi mkdir artifacts cp ./result/bin/* artifacts/ chmod +x artifacts/* @@ -17,4 +32,4 @@ nix-build linux-arm64-tonlib.nix cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so cp ./result/lib/libemulator.so artifacts/ cp -r crypto/fift/lib artifacts/ -cp -r crypto/smartcont artifacts/ \ No newline at end of file +cp -r crypto/smartcont artifacts/ diff --git a/assembly/nix/build-linux-x86-64-nix.sh b/assembly/nix/build-linux-x86-64-nix.sh index eca6fe58..60d31c94 100644 --- a/assembly/nix/build-linux-x86-64-nix.sh +++ b/assembly/nix/build-linux-x86-64-nix.sh @@ -3,12 +3,28 @@ nix-build --version test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } +with_tests=false + + +while getopts 't' flag; do + case "${flag}" in + t) with_tests=true ;; + *) break + ;; + esac +done + cp assembly/nix/linux-x86-64* . cp assembly/nix/microhttpd.nix . cp assembly/nix/openssl.nix . export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz -nix-build linux-x86-64-static.nix +if [ "$with_tests" = true ]; then + nix-build linux-x86-64-static.nix --arg testing true +else + nix-build linux-x86-64-static.nix +fi + mkdir artifacts cp ./result/bin/* artifacts/ chmod +x artifacts/* @@ -17,4 +33,4 @@ nix-build linux-x86-64-tonlib.nix cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so cp ./result/lib/libemulator.so artifacts/ cp -r crypto/fift/lib artifacts/ -cp -r crypto/smartcont artifacts/ \ No newline at end of file +cp -r crypto/smartcont artifacts/ diff --git a/assembly/nix/build-macos-nix.sh b/assembly/nix/build-macos-nix.sh index fdf674a6..c3664bf0 100644 --- a/assembly/nix/build-macos-nix.sh +++ b/assembly/nix/build-macos-nix.sh @@ -3,9 +3,25 @@ nix-build --version test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } +with_tests=false + + +while getopts 't' flag; do + case "${flag}" in + t) with_tests=true ;; + *) break + ;; + esac +done + cp assembly/nix/macos-* . export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz -nix-build macos-static.nix + +if [ "$with_tests" = true ]; then + nix-build macos-static.nix --arg testing true +else + nix-build macos-static.nix +fi mkdir artifacts cp ./result-bin/bin/* artifacts/ chmod +x artifacts/* @@ -14,4 +30,4 @@ nix-build macos-tonlib.nix cp ./result/lib/libtonlibjson.dylib artifacts/ cp ./result/lib/libemulator.dylib artifacts/ cp -r crypto/fift/lib artifacts/ -cp -r crypto/smartcont artifacts/ \ No newline at end of file +cp -r crypto/smartcont artifacts/ diff --git a/assembly/nix/linux-arm64-static.nix b/assembly/nix/linux-arm64-static.nix index 616dfba5..5e834269 100644 --- a/assembly/nix/linux-arm64-static.nix +++ b/assembly/nix/linux-arm64-static.nix @@ -3,6 +3,7 @@ { pkgs ? import { system = builtins.currentSystem; } , lib ? pkgs.lib , stdenv ? pkgs.stdenv +, testing ? false }: let microhttpdmy = (import ./microhttpd.nix) {}; @@ -25,7 +26,7 @@ stdenv.mkDerivation { ]; makeStatic = true; - doCheck = true; + doCheck = testing; cmakeFlags = [ "-DTON_USE_ABSEIL=OFF" diff --git a/assembly/nix/linux-x86-64-static.nix b/assembly/nix/linux-x86-64-static.nix index 616dfba5..5e834269 100644 --- a/assembly/nix/linux-x86-64-static.nix +++ b/assembly/nix/linux-x86-64-static.nix @@ -3,6 +3,7 @@ { pkgs ? import { system = builtins.currentSystem; } , lib ? pkgs.lib , stdenv ? pkgs.stdenv +, testing ? false }: let microhttpdmy = (import ./microhttpd.nix) {}; @@ -25,7 +26,7 @@ stdenv.mkDerivation { ]; makeStatic = true; - doCheck = true; + doCheck = testing; cmakeFlags = [ "-DTON_USE_ABSEIL=OFF" diff --git a/assembly/nix/macos-static.nix b/assembly/nix/macos-static.nix index e65ec1a8..a3d4667f 100644 --- a/assembly/nix/macos-static.nix +++ b/assembly/nix/macos-static.nix @@ -3,6 +3,7 @@ { pkgs ? import { system = builtins.currentSystem; } , lib ? pkgs.lib , stdenv ? pkgs.stdenv +, testing ? false }: pkgs.llvmPackages_14.stdenv.mkDerivation { @@ -29,7 +30,7 @@ pkgs.llvmPackages_14.stdenv.mkDerivation { dontAddStaticConfigureFlags = true; makeStatic = true; - doCheck = true; + doCheck = testing; configureFlags = []; @@ -62,4 +63,4 @@ pkgs.llvmPackages_14.stdenv.mkDerivation { done ''; outputs = [ "bin" "out" ]; -} \ No newline at end of file +} From 49d62dc3bb075605b9174d1fb68aadda0a7412b2 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Wed, 24 Jan 2024 13:05:22 +0300 Subject: [PATCH 67/71] Activate new changes in TVM by version>=5, reduce gas cost for loading libraries (#875) Co-authored-by: SpyCheese --- crypto/vm/cellops.cpp | 47 +++++++++++++++++++---------------- crypto/vm/cells/CellSlice.cpp | 14 ++++++----- crypto/vm/vm.h | 2 +- crypto/vm/vmstate.h | 4 +++ doc/GlobalVersions.md | 11 ++++++-- 5 files changed, 47 insertions(+), 31 deletions(-) diff --git a/crypto/vm/cellops.cpp b/crypto/vm/cellops.cpp index 9b702fcd..af84a67e 100644 --- a/crypto/vm/cellops.cpp +++ b/crypto/vm/cellops.cpp @@ -892,34 +892,37 @@ int exec_load_special_cell(VmState* st, bool quiet) { Stack& stack = st->get_stack(); VM_LOG(st) << "execute XLOAD" << (quiet ? "Q" : ""); auto cell = stack.pop_cell(); - auto r_loaded_cell = cell->load_cell(); - if (r_loaded_cell.is_error()) { - if (quiet) { - stack.push_bool(false); - return 0; - } else { - throw VmError{Excno::cell_und, "failed to load cell"}; - } - } - auto loaded_cell = r_loaded_cell.move_as_ok(); - if (loaded_cell.data_cell->is_special()) { - if (loaded_cell.data_cell->special_type() != CellTraits::SpecialType::Library) { + if (st->get_global_version() >= 5) { + st->register_cell_load(cell->get_hash()); + auto r_loaded_cell = cell->load_cell(); + if (r_loaded_cell.is_error()) { if (quiet) { stack.push_bool(false); return 0; } else { - throw VmError{Excno::cell_und, "unexpected special cell"}; + throw VmError{Excno::cell_und, "failed to load cell"}; } } - CellSlice cs(std::move(loaded_cell)); - DCHECK(cs.size() == Cell::hash_bits + 8); - cell = st->load_library(cs.data_bits() + 8); - if (cell.is_null()) { - if (quiet) { - stack.push_bool(false); - return 0; - } else { - throw VmError{Excno::cell_und, "failed to load library cell"}; + auto loaded_cell = r_loaded_cell.move_as_ok(); + if (loaded_cell.data_cell->is_special()) { + if (loaded_cell.data_cell->special_type() != CellTraits::SpecialType::Library) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "unexpected special cell"}; + } + } + CellSlice cs(std::move(loaded_cell)); + DCHECK(cs.size() == Cell::hash_bits + 8); + cell = st->load_library(cs.data_bits() + 8); + if (cell.is_null()) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "failed to load library cell"}; + } } } } diff --git a/crypto/vm/cells/CellSlice.cpp b/crypto/vm/cells/CellSlice.cpp index cbfc9e50..ee5f6941 100644 --- a/crypto/vm/cells/CellSlice.cpp +++ b/crypto/vm/cells/CellSlice.cpp @@ -1056,10 +1056,10 @@ std::ostream& operator<<(std::ostream& os, Ref cs_ref) { // If can_be_special is not null, then it is allowed to load special cell // Flag whether loaded cell is actually special will be stored into can_be_special VirtualCell::LoadedCell load_cell_slice_impl(Ref cell, bool* can_be_special) { + auto* vm_state_interface = VmStateInterface::get(); bool library_loaded = false; while (true) { - auto* vm_state_interface = VmStateInterface::get(); - if (vm_state_interface) { + if (vm_state_interface && !library_loaded) { vm_state_interface->register_cell_load(cell->get_hash()); } auto r_loaded_cell = cell->load_cell(); @@ -1077,11 +1077,13 @@ VirtualCell::LoadedCell load_cell_slice_impl(Ref cell, bool* can_be_specia *can_be_special = loaded_cell.data_cell->is_special(); } else if (loaded_cell.data_cell->is_special()) { if (loaded_cell.data_cell->special_type() == DataCell::SpecialType::Library) { - if (library_loaded) { - throw VmError{Excno::cell_und, "failed to load library cell: recursive library cells are not allowed"}; - } - library_loaded = true; if (vm_state_interface) { + if (vm_state_interface->get_global_version() >= 5) { + if (library_loaded) { + throw VmError{Excno::cell_und, "failed to load library cell: recursive library cells are not allowed"}; + } + library_loaded = true; + } CellSlice cs(std::move(loaded_cell)); DCHECK(cs.size() == Cell::hash_bits + 8); auto library_cell = vm_state_interface->load_library(cs.data_bits() + 8); diff --git a/crypto/vm/vm.h b/crypto/vm/vm.h index fd18f92d..e5cca026 100644 --- a/crypto/vm/vm.h +++ b/crypto/vm/vm.h @@ -340,7 +340,7 @@ class VmState final : public VmStateInterface { void preclear_cr(const ControlRegs& save) { cr &= save; } - int get_global_version() const { + int get_global_version() const override { return global_version; } void set_global_version(int version) { diff --git a/crypto/vm/vmstate.h b/crypto/vm/vmstate.h index a81a4e78..0d6c3fdf 100644 --- a/crypto/vm/vmstate.h +++ b/crypto/vm/vmstate.h @@ -19,6 +19,7 @@ #pragma once #include "common/refcnt.hpp" #include "vm/cells.h" +#include "common/global-version.h" #include "td/utils/Context.h" @@ -38,6 +39,9 @@ class VmStateInterface : public td::Context { virtual bool register_op(int op_units = 1) { return true; }; + virtual int get_global_version() const { + return ton::SUPPORTED_VERSION; + } }; } // namespace vm diff --git a/doc/GlobalVersions.md b/doc/GlobalVersions.md index 7d9729f8..d2064b90 100644 --- a/doc/GlobalVersions.md +++ b/doc/GlobalVersions.md @@ -39,11 +39,18 @@ intermediate value before division (e.g. `(xy+w)/z`). * Unpaid storage fee is now saved to `due_payment` ## Version 5 + +### Gas limits Version 5 enables higher gas limits for special contracts. * Gas limit for all transactions on special contracts is set to `special_gas_limit` from `ConfigParam 20` (which is 35M at the moment of writing). -Previously only ticktock transactions had this limit, while ordinary transactions could use up to `gas_limit` gas (1M). +Previously only ticktock transactions had this limit, while ordinary transactions had a default limit of `gas_limit` gas (1M). * Gas usage of special contracts is not taken into account when checking block limits. This allows keeping masterchain block limits low while having high gas limits for elector. * Gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` is increased to `special_gas_limit * 2` until 2024-02-29. -See [this post](https://t.me/tonstatus/88) for details. \ No newline at end of file +See [this post](https://t.me/tonstatus/88) for details. + +### Loading libraries +* Loading "nested libraries" (i.e. a library cell that points to another library cell) throws an exception. +* Loading a library consumes gas for cell load only once (for the library cell), not twice (both for the library cell and the cell in the library). +* `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. \ No newline at end of file From e459aea8e8978335047d6e2f0b51b2ade3d102ec Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Thu, 25 Jan 2024 13:51:50 +0300 Subject: [PATCH 68/71] Update changelog --- Changelog.md | 10 ++++++++-- recent_changelog.md | 10 ++++++++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/Changelog.md b/Changelog.md index 9fec686d..9a98e44c 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,14 +1,20 @@ ## 2024.01 Update -1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to not count gas on special accounts. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `gas_prices_v3` in `ConfigParam 20;`. +1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to caunt gas on special accounts separately. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `version >= 5` in `ConfigParam 8;`. * Besides update of config temporally increases gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` to `special_gas_limit`, see [details](https://t.me/tonstatus/88). 2. Improvements in LS behavior * Improved detection of the state with all shards applied to decrease rate of `Block is not applied` error * Better error logs: `block not in db` and `block is not applied` separation * Fix error in proof generation for blocks after merge + * Fix most of `block is not applied` issues related to sending too recent block in Proofs + * LS now check external messages till `accept_message` (`set_gas`). 3. Improvements in DHT work and storage, CellDb, config.json ammendment, peer misbehavior detection, validator session stats collection, emulator. +4. Change in CTOS and XLOAD behavior activated through setting `version >= 5` in `ConfigParam 8;`: + * Loading "nested libraries" (i.e. a library cell that points to another library cell) throws an exception. + * Loading a library consumes gas for cell load only once (for the library cell), not twice (both for the library cell and the cell in the library). + * `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. -Besides the work of the core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection). +Besides the work of the Core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection) and @akifoq (CTOS behavior and gas limit scheme for special accounts). ## 2023.12 Update diff --git a/recent_changelog.md b/recent_changelog.md index 637f416c..852fb764 100644 --- a/recent_changelog.md +++ b/recent_changelog.md @@ -1,11 +1,17 @@ ## 2024.01 Update -1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to not count gas on special accounts. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `gas_prices_v3` in `ConfigParam 20;`. +1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to caunt gas on special accounts separately. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `version >= 5` in `ConfigParam 8;`. * Besides update of config temporally increases gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` to `special_gas_limit`, see [details](https://t.me/tonstatus/88). 2. Improvements in LS behavior * Improved detection of the state with all shards applied to decrease rate of `Block is not applied` error * Better error logs: `block not in db` and `block is not applied` separation * Fix error in proof generation for blocks after merge + * Fix most of `block is not applied` issues related to sending too recent block in Proofs + * LS now check external messages till `accept_message` (`set_gas`). 3. Improvements in DHT work and storage, CellDb, config.json ammendment, peer misbehavior detection, validator session stats collection, emulator. +4. Change in CTOS and XLOAD behavior activated through setting `version >= 5` in `ConfigParam 8;`: + * Loading "nested libraries" (i.e. a library cell that points to another library cell) throws an exception. + * Loading a library consumes gas for cell load only once (for the library cell), not twice (both for the library cell and the cell in the library). + * `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. -Besides the work of the core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection). +Besides the work of the Core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection) and @akifoq (CTOS behavior and gas limit scheme for special accounts). From 64b04e46d708221c6f0e3d5a10bb399bc46c5575 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Fri, 26 Jan 2024 15:43:53 +0300 Subject: [PATCH 69/71] Cheap fee calculations (#878) * TVM v6 * New tuple with unpacked config parameters in c7 * New instructions for calculating fees * Change unpacked_config_tuple, fix typo --------- Co-authored-by: SpyCheese --- common/global-version.h | 2 +- crypto/block/mc-config.cpp | 84 ++++++++++--- crypto/block/mc-config.h | 13 +- crypto/block/transaction.cpp | 26 ++++ crypto/block/transaction.h | 1 + crypto/fift/lib/Asm.fif | 7 +- crypto/smc-envelope/SmartContract.cpp | 3 + crypto/vm/tonops.cpp | 167 +++++++++++++++++++++++--- doc/GlobalVersions.md | 26 +++- tonlib/test/offline.cpp | 4 +- utils/opcode-timing.cpp | 121 ++++++++++++++----- validator/impl/validate-query.cpp | 3 + 12 files changed, 384 insertions(+), 73 deletions(-) diff --git a/common/global-version.h b/common/global-version.h index 9b23d46d..0a90ab85 100644 --- a/common/global-version.h +++ b/common/global-version.h @@ -19,6 +19,6 @@ namespace ton { // See doc/GlobalVersions.md -const int SUPPORTED_VERSION = 5; +const int SUPPORTED_VERSION = 6; } diff --git a/crypto/block/mc-config.cpp b/crypto/block/mc-config.cpp index 0cef7322..8b387155 100644 --- a/crypto/block/mc-config.cpp +++ b/crypto/block/mc-config.cpp @@ -621,12 +621,14 @@ td::Result> Config::get_storage_prices() const { } vm::Dictionary dict{std::move(cell), 32}; if (!dict.check_for_each([&res](Ref cs_ref, td::ConstBitPtr key, int n) -> bool { - block::gen::StoragePrices::Record data; - if (!tlb::csr_unpack(std::move(cs_ref), data) || data.utime_since != key.get_uint(n)) { + auto r_prices = do_get_one_storage_prices(*cs_ref); + if (r_prices.is_error()) { + return false; + } + res.push_back(r_prices.move_as_ok()); + if (res.back().valid_since != key.get_uint(n)) { return false; } - res.emplace_back(data.utime_since, data.bit_price_ps, data.cell_price_ps, data.mc_bit_price_ps, - data.mc_cell_price_ps); return true; })) { return td::Status::Error("invalid storage prices dictionary in configuration parameter 18"); @@ -634,16 +636,25 @@ td::Result> Config::get_storage_prices() const { return std::move(res); } -td::Result Config::do_get_gas_limits_prices(td::Ref cell, int id) { +td::Result Config::do_get_one_storage_prices(vm::CellSlice cs) { + block::gen::StoragePrices::Record data; + if (!tlb::unpack(cs, data)) { + return td::Status::Error("invalid storage prices dictionary in configuration parameter 18"); + } + return StoragePrices{data.utime_since, data.bit_price_ps, data.cell_price_ps, data.mc_bit_price_ps, + data.mc_cell_price_ps}; +} + +td::Result Config::do_get_gas_limits_prices(vm::CellSlice cs, int id) { GasLimitsPrices res; - auto cs = vm::load_cell_slice(cell); + vm::CellSlice cs0 = cs; block::gen::GasLimitsPrices::Record_gas_flat_pfx flat; if (tlb::unpack(cs, flat)) { cs = *flat.other; res.flat_gas_limit = flat.flat_gas_limit; res.flat_gas_price = flat.flat_gas_price; } else { - cs = vm::load_cell_slice(cell); + cs = cs0; } auto f = [&](const auto& r, td::uint64 spec_limit) { res.gas_limit = r.gas_limit; @@ -654,7 +665,6 @@ td::Result Config::do_get_gas_limits_prices(td::Ref c res.delete_due_limit = r.delete_due_limit; }; block::gen::GasLimitsPrices::Record_gas_prices_ext rec; - vm::CellSlice cs0 = cs; if (tlb::unpack(cs, rec)) { f(rec, rec.special_gas_limit); } else { @@ -689,7 +699,7 @@ td::Result Config::get_gas_limits_prices(bool is_masterchain) c if (cell.is_null()) { return td::Status::Error(PSLICE() << "configuration parameter " << id << " with gas prices is absent"); } - return do_get_gas_limits_prices(std::move(cell), id); + return do_get_gas_limits_prices(vm::load_cell_slice(cell), id); } td::Result Config::get_msg_prices(bool is_masterchain) const { @@ -698,7 +708,10 @@ td::Result Config::get_msg_prices(bool is_masterchain) const { if (cell.is_null()) { return td::Status::Error(PSLICE() << "configuration parameter " << id << " with msg prices is absent"); } - auto cs = vm::load_cell_slice(std::move(cell)); + return do_get_msg_prices(vm::load_cell_slice(cell), id); +} + +td::Result Config::do_get_msg_prices(vm::CellSlice cs, int id) { block::gen::MsgForwardPrices::Record rec; if (!tlb::unpack(cs, rec)) { return td::Status::Error(PSLICE() << "configuration parameter " << id @@ -1917,10 +1930,17 @@ std::vector Config::compute_total_validator_set(int next) c } td::Result Config::get_size_limits_config() const { - SizeLimitsConfig limits; td::Ref param = get_config_param(43); if (param.is_null()) { - return limits; + return do_get_size_limits_config({}); + } + return do_get_size_limits_config(vm::load_cell_slice_ref(param)); +} + +td::Result Config::do_get_size_limits_config(td::Ref cs) { + SizeLimitsConfig limits; + if (cs.is_null()) { + return limits; // default values } auto unpack_v1 = [&](auto& rec) { limits.max_msg_bits = rec.max_msg_bits; @@ -1939,9 +1959,9 @@ td::Result Config::get_size_limits_config() const { }; gen::SizeLimitsConfig::Record_size_limits_config rec_v1; gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2; - if (tlb::unpack_cell(param, rec_v1)) { + if (tlb::csr_unpack(cs, rec_v1)) { unpack_v1(rec_v1); - } else if (tlb::unpack_cell(param, rec_v2)) { + } else if (tlb::csr_unpack(cs, rec_v2)) { unpack_v2(rec_v2); } else { return td::Status::Error("configuration parameter 43 is invalid"); @@ -1976,6 +1996,42 @@ BurningConfig Config::get_burning_config() const { return c; } +td::Ref Config::get_unpacked_config_tuple(ton::UnixTime now) const { + auto get_param = [&](td::int32 idx) -> vm::StackEntry { + auto cell = get_config_param(idx); + if (cell.is_null()) { + return {}; + } + return vm::load_cell_slice_ref(cell); + }; + auto get_current_storage_prices = [&]() -> vm::StackEntry { + auto cell = get_config_param(18); + if (cell.is_null()) { + return {}; + } + vm::StackEntry res; + vm::Dictionary dict{std::move(cell), 32}; + dict.check_for_each([&](Ref cs_ref, td::ConstBitPtr key, int n) -> bool { + auto utime_since = key.get_uint(n); + if (now >= utime_since) { + res = std::move(cs_ref); + return true; + } + return false; + }); + return res; + }; + std::vector tuple; + tuple.push_back(get_current_storage_prices()); // storage_prices + tuple.push_back(get_param(19)); // global_id + tuple.push_back(get_param(20)); // config_mc_gas_prices + tuple.push_back(get_param(21)); // config_gas_prices + tuple.push_back(get_param(24)); // config_mc_fwd_prices + tuple.push_back(get_param(25)); // config_fwd_prices + tuple.push_back(get_param(43)); // size_limits_config + return td::make_cnt_ref>(std::move(tuple)); +} + td::Result> Config::unpack_validator_set_start_stop(Ref vset_root) { if (vset_root.is_null()) { return td::Status::Error("validator set absent"); diff --git a/crypto/block/mc-config.h b/crypto/block/mc-config.h index caab93f3..624e3e03 100644 --- a/crypto/block/mc-config.h +++ b/crypto/block/mc-config.h @@ -350,7 +350,11 @@ struct GasLimitsPrices { td::uint64 freeze_due_limit{0}; td::uint64 delete_due_limit{0}; - td::RefInt256 compute_gas_price(td::uint64 gas_used) const; + td::RefInt256 compute_gas_price(td::uint64 gas_used) const { + return gas_used <= flat_gas_limit + ? td::make_refint(flat_gas_price) + : td::rshift(td::make_refint(gas_price) * (gas_used - flat_gas_limit), 16, 1) + flat_gas_price; + } }; // msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms @@ -365,6 +369,7 @@ struct MsgPrices { td::uint32 first_frac; td::uint32 next_frac; td::uint64 compute_fwd_fees(td::uint64 cells, td::uint64 bits) const; + td::RefInt256 compute_fwd_fees256(td::uint64 cells, td::uint64 bits) const; std::pair compute_fwd_ihr_fees(td::uint64 cells, td::uint64 bits, bool ihr_disabled = false) const; MsgPrices() = default; @@ -604,9 +609,11 @@ class Config { bool is_special_smartcontract(const ton::StdSmcAddress& addr) const; static td::Result> unpack_validator_set(Ref valset_root); td::Result> get_storage_prices() const; + static td::Result do_get_one_storage_prices(vm::CellSlice cs); td::Result get_gas_limits_prices(bool is_masterchain = false) const; - static td::Result do_get_gas_limits_prices(td::Ref cell, int id); + static td::Result do_get_gas_limits_prices(vm::CellSlice cs, int id); td::Result get_msg_prices(bool is_masterchain = false) const; + static td::Result do_get_msg_prices(vm::CellSlice cs, int id); static CatchainValidatorsConfig unpack_catchain_validators_config(Ref cell); CatchainValidatorsConfig get_catchain_validators_config() const; td::Status visit_validator_params() const; @@ -633,8 +640,10 @@ class Config { ton::CatchainSeqno cc_seqno) const; std::vector compute_total_validator_set(int next) const; td::Result get_size_limits_config() const; + static td::Result do_get_size_limits_config(td::Ref cs); std::unique_ptr get_suspended_addresses(ton::UnixTime now) const; BurningConfig get_burning_config() const; + td::Ref get_unpacked_config_tuple(ton::UnixTime now) const; static std::vector do_compute_validator_set(const block::CatchainValidatorsConfig& ccv_conf, ton::ShardIdFull shard, const block::ValidatorSet& vset, ton::UnixTime time, diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index e43b9305..e08407a2 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1337,6 +1337,10 @@ Ref Transaction::prepare_vm_c7(const ComputePhaseConfig& cfg) const { // may only return tuple or raise Error (See crypto/block/mc-config.cpp#2223) tuple.push_back(cfg.prev_blocks_info.not_null() ? vm::StackEntry(cfg.prev_blocks_info) : vm::StackEntry()); } + if (cfg.global_version >= 6) { + tuple.push_back(cfg.unpacked_config_tuple.not_null() ? vm::StackEntry(cfg.unpacked_config_tuple) + : vm::StackEntry()); // unpacked_config_tuple:[...] + } auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); LOG(DEBUG) << "SmartContractInfo initialized with " << vm::StackEntry(tuple_ref).to_string(); return vm::make_tuple_ref(std::move(tuple_ref)); @@ -1920,6 +1924,25 @@ td::uint64 MsgPrices::compute_fwd_fees(td::uint64 cells, td::uint64 bits) const .lo(); } +/** + * Computes the forward fees for a message based on the number of cells and bits. + * Return the result as td::RefInt256 + * + * msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms + * ihr_fwd_fees = ceil((msg_fwd_fees * ihr_price_factor)/2^16) nanograms + * bits in the root cell of a message are not included in msg.bits (lump_price pays for them) + * + * @param cells The number of cells in the message. + * @param bits The number of bits in the message. + * + * @returns The computed forward fees for the message as td::RefInt256j. + */ +td::RefInt256 MsgPrices::compute_fwd_fees256(td::uint64 cells, td::uint64 bits) const { + return td::rshift( + td::make_refint(lump_price) + td::make_refint(bit_price) * bits + td::make_refint(cell_price) * cells, 16, + 1); // divide by 2^16 with ceil rounding +} + /** * Computes the forward fees and IHR fees for a message with the given number of cells and bits. * @@ -3539,6 +3562,9 @@ td::Status FetchConfigParams::fetch_config_params( if (compute_phase_cfg->global_version >= 4) { compute_phase_cfg->prev_blocks_info = std::move(prev_blocks_info); } + if (compute_phase_cfg->global_version >= 6) { + compute_phase_cfg->unpacked_config_tuple = config.get_unpacked_config_tuple(now); + } compute_phase_cfg->suspended_addresses = config.get_suspended_addresses(now); compute_phase_cfg->size_limits = size_limits; } diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index 1ed2dfd3..57defc8c 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -117,6 +117,7 @@ struct ComputePhaseConfig { td::uint16 max_vm_data_depth = 512; int global_version = 0; Ref prev_blocks_info; + Ref unpacked_config_tuple; std::unique_ptr suspended_addresses; SizeLimitsConfig size_limits; int vm_log_verbosity = 0; diff --git a/crypto/fift/lib/Asm.fif b/crypto/fift/lib/Asm.fif index 0a4c7074..630f325a 100644 --- a/crypto/fift/lib/Asm.fif +++ b/crypto/fift/lib/Asm.fif @@ -2,7 +2,7 @@ library TVM_Asm // simple TVM Assembler namespace Asm Asm definitions -"0.4.4" constant asm-fif-version +"0.4.5" constant asm-fif-version variable @atend variable @was-split @@ -1295,12 +1295,17 @@ x{F82A} @Defop MYCODE x{F82B} @Defop INCOMINGVALUE x{F82C} @Defop STORAGEFEES x{F82D} @Defop PREVBLOCKSINFOTUPLE +x{F82E} @Defop UNPACKEDCONFIGTUPLE x{F830} @Defop CONFIGDICT x{F832} @Defop CONFIGPARAM x{F833} @Defop CONFIGOPTPARAM x{F83400} @Defop PREVMCBLOCKS x{F83401} @Defop PREVKEYBLOCK x{F835} @Defop GLOBALID +x{F836} @Defop GETEXECUTIONPRICE +x{F837} @Defop GETSTORAGEPRICE +x{F838} @Defop GETFORWARDPRICE +x{F839} @Defop GETPRECOMPILEDGAS x{F840} @Defop GETGLOBVAR { dup 1 31 @rangechk prepare_vm_c7(SmartContract::Args args, td::Ref cod // prev_key_block:BlockId ] : PrevBlocksInfo tuple.push_back(args.prev_blocks_info ? args.prev_blocks_info.value() : vm::StackEntry{}); // prev_block_info } + if (args.config && args.config.value()->get_global_version() >= 6) { + tuple.push_back(args.config.value()->get_unpacked_config_tuple(now)); // unpacked_config_tuple + } auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); //LOG(DEBUG) << "SmartContractInfo initialized with " << vm::StackEntry(tuple).to_string(); return vm::make_tuple_ref(std::move(tuple_ref)); diff --git a/crypto/vm/tonops.cpp b/crypto/vm/tonops.cpp index dce61797..753bbc0a 100644 --- a/crypto/vm/tonops.cpp +++ b/crypto/vm/tonops.cpp @@ -35,6 +35,7 @@ #include "openssl/digest.hpp" #include #include "bls.h" +#include "mc-config.h" namespace vm { @@ -122,6 +123,20 @@ static const StackEntry& get_param(VmState* st, unsigned idx) { return tuple_index(t1, idx); } +// ConfigParams: 18 (only one entry), 19, 20, 21, 24, 25, 43 +static td::Ref get_unpacked_config_param(VmState* st, unsigned idx) { + auto tuple = st->get_c7(); + auto t1 = tuple_index(tuple, 0).as_tuple_range(255); + if (t1.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a tuple"}; + } + auto t2 = tuple_index(t1, 14).as_tuple_range(255); + if (t2.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a tuple"}; + } + return tuple_index(t2, idx).as_slice(); +} + int exec_get_param(VmState* st, unsigned idx, const char* name) { if (name) { VM_LOG(st) << "execute " << name; @@ -232,20 +247,107 @@ int exec_get_prev_blocks_info(VmState* st, unsigned idx, const char* name) { } int exec_get_global_id(VmState* st) { - Ref config = get_param(st, 9).as_cell(); - if (config.is_null()) { - throw VmError{Excno::type_chk, "intermediate value is not a cell"}; + VM_LOG(st) << "execute GLOBALID"; + if (st->get_global_version() >= 6) { + Ref cs = get_unpacked_config_param(st, 1); + if (cs.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a slice"}; + } + if (cs->size() < 32) { + throw VmError{Excno::cell_und, "invalid global-id config"}; + } + st->get_stack().push_smallint(cs->prefetch_long(32)); + } else { + Ref config = get_param(st, 19).as_cell(); + if (config.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a cell"}; + } + Dictionary config_dict{std::move(config), 32}; + Ref cell = config_dict.lookup_ref(td::BitArray<32>{19}); + if (cell.is_null()) { + throw VmError{Excno::unknown, "invalid global-id config"}; + } + CellSlice cs = load_cell_slice(cell); + if (cs.size() < 32) { + throw VmError{Excno::unknown, "invalid global-id config"}; + } + st->get_stack().push_smallint(cs.fetch_long(32)); } - Dictionary config_dict{std::move(config), 32}; - Ref cell = config_dict.lookup_ref(td::BitArray<32>{19}); - if (cell.is_null()) { - throw VmError{Excno::unknown, "invalid global-id config"}; + return 0; +} + +int exec_get_execution_price(VmState* st) { + VM_LOG(st) << "execute GETEXECUTIONPRICE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 gas = stack.pop_long_range(std::numeric_limits::max(), 0); + Ref cs = get_unpacked_config_param(st, is_masterchain ? 2 : 3); + if (cs.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a slice"}; } - CellSlice cs = load_cell_slice(cell); - if (cs.size() < 32) { - throw VmError{Excno::unknown, "invalid global-id config"}; + auto r_prices = block::Config::do_get_gas_limits_prices(*cs, is_masterchain ? 20 : 21); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; } - st->get_stack().push_smallint(cs.fetch_long(32)); + block::GasLimitsPrices prices = r_prices.move_as_ok(); + stack.push_int(prices.compute_gas_price(gas)); + return 0; +} + +int exec_get_storage_price(VmState* st) { + VM_LOG(st) << "execute GETSTORAGEPRICE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::int64 delta = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 bits = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 cells = stack.pop_long_range(std::numeric_limits::max(), 0); + Ref cs = get_unpacked_config_param(st, 0); + if (cs.is_null()) { + // null means tat no StoragePrices is active, so the price is 0 + stack.push_smallint(0); + return 0; + } + auto r_prices = block::Config::do_get_one_storage_prices(*cs); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; + } + block::StoragePrices prices = r_prices.move_as_ok(); + td::RefInt256 total; + if (is_masterchain) { + total = td::make_refint(cells) * prices.mc_cell_price; + total += td::make_refint(bits) * prices.mc_bit_price; + } else { + total = td::make_refint(cells) * prices.cell_price; + total += td::make_refint(bits) * prices.bit_price; + } + total *= delta; + stack.push_int(td::rshift(total, 16, 1)); + return 0; +} + +int exec_get_forward_price(VmState* st) { + VM_LOG(st) << "execute GETFORWARDPRICE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 bits = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 cells = stack.pop_long_range(std::numeric_limits::max(), 0); + Ref cs = get_unpacked_config_param(st, is_masterchain ? 4 : 5); + if (cs.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a slice"}; + } + auto r_prices = block::Config::do_get_msg_prices(*cs, is_masterchain ? 24 : 25); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; + } + block::MsgPrices prices = r_prices.move_as_ok(); + stack.push_int(prices.compute_fwd_fees256(cells, bits)); + return 0; +} + +int exec_get_precompiled_gas(VmState* st) { + VM_LOG(st) << "execute GETPRECOMPILEDGAS"; + Stack& stack = st->get_stack(); + stack.push_null(); return 0; } @@ -263,13 +365,18 @@ void register_ton_config_ops(OpcodeTable& cp0) { .insert(OpcodeInstr::mksimple(0xf82b, 16, "INCOMINGVALUE", std::bind(exec_get_param, _1, 11, "INCOMINGVALUE"))) .insert(OpcodeInstr::mksimple(0xf82c, 16, "STORAGEFEES", std::bind(exec_get_param, _1, 12, "STORAGEFEES"))) .insert(OpcodeInstr::mksimple(0xf82d, 16, "PREVBLOCKSINFOTUPLE", std::bind(exec_get_param, _1, 13, "PREVBLOCKSINFOTUPLE"))) - .insert(OpcodeInstr::mkfixedrange(0xf82e, 0xf830, 16, 4, instr::dump_1c("GETPARAM "), exec_get_var_param)) + .insert(OpcodeInstr::mksimple(0xf82e, 16, "UNPACKEDCONFIGTUPLE", std::bind(exec_get_param, _1, 14, "UNPACKEDCONFIGTUPLE"))) + .insert(OpcodeInstr::mkfixedrange(0xf82f, 0xf830, 16, 4, instr::dump_1c("GETPARAM "), exec_get_var_param)) .insert(OpcodeInstr::mksimple(0xf830, 16, "CONFIGDICT", exec_get_config_dict)) .insert(OpcodeInstr::mksimple(0xf832, 16, "CONFIGPARAM", std::bind(exec_get_config_param, _1, false))) .insert(OpcodeInstr::mksimple(0xf833, 16, "CONFIGOPTPARAM", std::bind(exec_get_config_param, _1, true))) .insert(OpcodeInstr::mksimple(0xf83400, 24, "PREVMCBLOCKS", std::bind(exec_get_prev_blocks_info, _1, 0, "PREVMCBLOCKS"))->require_version(4)) .insert(OpcodeInstr::mksimple(0xf83401, 24, "PREVKEYBLOCK", std::bind(exec_get_prev_blocks_info, _1, 1, "PREVKEYBLOCK"))->require_version(4)) .insert(OpcodeInstr::mksimple(0xf835, 16, "GLOBALID", exec_get_global_id)->require_version(4)) + .insert(OpcodeInstr::mksimple(0xf836, 16, "GETEXECUTIONPRICE", exec_get_execution_price)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf837, 16, "GETSTORAGEPRICE", exec_get_storage_price)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf838, 16, "GETFORWARDPRICE", exec_get_forward_price)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf839, 16, "GETPRECOMPILEDGAS", exec_get_precompiled_gas)->require_version(6)) .insert(OpcodeInstr::mksimple(0xf840, 16, "GETGLOBVAR", exec_get_global_var)) .insert(OpcodeInstr::mkfixedrange(0xf841, 0xf860, 16, 5, instr::dump_1c_and(31, "GETGLOB "), exec_get_global)) .insert(OpcodeInstr::mksimple(0xf860, 16, "SETGLOBVAR", exec_set_global_var)) @@ -1592,17 +1699,39 @@ int exec_send_message(VmState* st) { } bool is_masterchain = parse_addr_workchain(*my_addr) == -1 || (!ext_msg && parse_addr_workchain(*dest) == -1); - Ref config_dict = get_param(st, 9).as_cell(); - Dictionary config{config_dict, 32}; - Ref prices_cell = config.lookup_ref(td::BitArray<32>{is_masterchain ? 24 : 25}); - block::gen::MsgForwardPrices::Record prices; - if (prices_cell.is_null() || !tlb::unpack_cell(std::move(prices_cell), prices)) { + td::Ref prices_cs; + if (st->get_global_version() >= 6) { + prices_cs = get_unpacked_config_param(st, is_masterchain ? 4 : 5); + } else { + Ref config_dict = get_param(st, 9).as_cell(); + Dictionary config{config_dict, 32}; + Ref prices_cell = config.lookup_ref(td::BitArray<32>{is_masterchain ? 24 : 25}); + if (prices_cell.not_null()) { + prices_cs = load_cell_slice_ref(prices_cell); + } + } + if (prices_cs.is_null()) { throw VmError{Excno::unknown, "invalid prices config"}; } + auto r_prices = block::Config::do_get_msg_prices(*prices_cs, is_masterchain ? 24 : 25); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; + } + block::MsgPrices prices = r_prices.move_as_ok(); // msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms // bits in the root cell of a message are not included in msg.bits (lump_price pays for them) - vm::VmStorageStat stat(1 << 13); + td::uint64 max_cells; + if (st->get_global_version() >= 6) { + auto r_size_limits_config = block::Config::do_get_size_limits_config(get_unpacked_config_param(st, 6)); + if (r_size_limits_config.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_size_limits_config.error().message()}; + } + max_cells = r_size_limits_config.ok().max_msg_cells; + } else { + max_cells = 1 << 13; + } + vm::VmStorageStat stat(max_cells); CellSlice cs = load_cell_slice(msg_cell); cs.skip_first(cs.size()); stat.add_storage(cs); @@ -1650,7 +1779,7 @@ int exec_send_message(VmState* st) { if (ihr_disabled) { ihr_fee_short = 0; } else { - ihr_fee_short = td::uint128(fwd_fee_short).mult(prices.ihr_price_factor).shr(16).lo(); + ihr_fee_short = td::uint128(fwd_fee_short).mult(prices.ihr_factor).shr(16).lo(); } fwd_fee = td::RefInt256{true, fwd_fee_short}; ihr_fee = td::RefInt256{true, ihr_fee_short}; diff --git a/doc/GlobalVersions.md b/doc/GlobalVersions.md index d2064b90..1264dc2b 100644 --- a/doc/GlobalVersions.md +++ b/doc/GlobalVersions.md @@ -53,4 +53,28 @@ See [this post](https://t.me/tonstatus/88) for details. ### Loading libraries * Loading "nested libraries" (i.e. a library cell that points to another library cell) throws an exception. * Loading a library consumes gas for cell load only once (for the library cell), not twice (both for the library cell and the cell in the library). -* `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. \ No newline at end of file +* `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. + +## Version 6 + +### c7 tuple +**c7** tuple extended from 14 to 15 elements. The new element is a tuple that contains some config parameters as cell slices. +If the parameter is absent from the config, the value is null. +* **0**: `StoragePrices` from `ConfigParam 18`. Not the whole dict, but only the one StoragePrices entry (one which corresponds to the current time). +* **1**: `ConfigParam 19` (global id). +* **2**: `ConfigParam 20` (mc gas prices). +* **3**: `ConfigParam 21` (gas prices). +* **4**: `ConfigParam 24` (mc fwd fees). +* **5**: `ConfigParam 25` (fwd fees). +* **6**: `ConfigParam 43` (size limits). + +### New TVM instructions +* `GETEXECUTIONPRICE` (`gas_used is_mc - price`) - calculates gas fee. +* `GETSTORAGEPRICE` (`cells bits seconds is_mc - price`) - calculates storage fees (only current StoragePrices entry is used). +* `GETFORWARDPRICE` (`cells bits is_mc - price`) - calculates forward fee. +* `GETPRECOMPILEDGAS` (`- null`) - reserved, currently returns `null`. +`gas_used`, `cells`, `bits`, `time_delta` are integers in range `0..2^63-1`. + +### Other changes +* `GLOBALID` gets `ConfigParam 19` from the tuple, not from the config dict. This decreases gas usage. +* `SENDMSG` gets `ConfigParam 24/25` (message prices) from the tuple, not from the config dict, and also uses `ConfigParam 43` to get max_msg_cells. \ No newline at end of file diff --git a/tonlib/test/offline.cpp b/tonlib/test/offline.cpp index 0fedc865..7b4342dd 100644 --- a/tonlib/test/offline.cpp +++ b/tonlib/test/offline.cpp @@ -333,8 +333,8 @@ TEST(Tonlib, ConfigParseBug) { unsigned char buff[128]; int bits = (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), literal.begin(), literal.end()); CHECK(bits >= 0); - auto slice = vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize(); - block::Config::do_get_gas_limits_prices(std::move(slice), 21).ensure(); + auto cell = vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize(); + block::Config::do_get_gas_limits_prices(vm::load_cell_slice(cell), 21).ensure(); } TEST(Tonlib, EncryptionApi) { diff --git a/utils/opcode-timing.cpp b/utils/opcode-timing.cpp index 4f5c8ab4..d68928d0 100644 --- a/utils/opcode-timing.cpp +++ b/utils/opcode-timing.cpp @@ -11,6 +11,55 @@ #include "td/utils/ScopeGuard.h" #include "td/utils/StringBuilder.h" #include "td/utils/Timer.h" +#include "block.h" +#include "td/utils/filesystem.h" +#include "mc-config.h" + +td::Ref c7; + +void prepare_c7() { + auto now = (td::uint32)td::Clocks::system(); + td::Ref config_root; + auto config_data = td::read_file("config.boc"); + if (config_data.is_ok()) { + LOG(WARNING) << "Reading config from config.boc"; + auto r_cell = vm::std_boc_deserialize(config_data.move_as_ok()); + r_cell.ensure(); + config_root = r_cell.move_as_ok(); + } + + vm::CellBuilder addr; + addr.store_long(4, 3); + addr.store_long(0, 8); + addr.store_ones(256); + std::vector tuple = { + td::make_refint(0x076ef1ea), // [ magic:0x076ef1ea + td::zero_refint(), // actions:Integer + td::zero_refint(), // msgs_sent:Integer + td::make_refint(now), // unixtime:Integer + td::make_refint(0), // block_lt:Integer + td::make_refint(0), // trans_lt:Integer + td::make_refint(123), // rand_seed:Integer + block::CurrencyCollection(td::make_refint(10000LL * 1000000000)) + .as_vm_tuple(), // balance_remaining:[Integer (Maybe Cell)] + addr.as_cellslice_ref(), // myself:MsgAddressInt + vm::StackEntry::maybe(config_root) // global_config:(Maybe Cell) ] = SmartContractInfo; + }; + tuple.push_back({}); // code:Cell + tuple.push_back(block::CurrencyCollection(td::make_refint(2000LL * 1000000000)) + .as_vm_tuple()); // in_msg_value:[Integer (Maybe Cell)] + tuple.push_back(td::make_refint(0)); // storage_fees:Integer + tuple.push_back(vm::StackEntry()); // prev_blocks_info + if (config_root.not_null()) { + block::Config config{config_root}; + config.unpack().ensure(); + tuple.push_back(config.get_unpacked_config_tuple(now)); // unpacked_config_tuple + } else { + tuple.push_back(vm::StackEntry()); + } + auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); + c7 = vm::make_tuple_ref(std::move(tuple_ref)); +} td::Ref to_cell(td::Slice s) { if (s.size() >= 4 && s.substr(0, 4) == "boc:") { @@ -34,9 +83,11 @@ struct runInfo { long long gasUsage; int vmReturnCode; - runInfo() : runtime(0.0), gasUsage(0), vmReturnCode(0) {} - runInfo(long double runtime, long long gasUsage, int vmReturnCode) : - runtime(runtime), gasUsage(gasUsage), vmReturnCode(vmReturnCode) {} + runInfo() : runtime(0.0), gasUsage(0), vmReturnCode(0) { + } + runInfo(long double runtime, long long gasUsage, int vmReturnCode) + : runtime(runtime), gasUsage(gasUsage), vmReturnCode(vmReturnCode) { + } runInfo operator+(const runInfo& addend) const { return {runtime + addend.runtime, gasUsage + addend.gasUsage, vmReturnCode ? vmReturnCode : addend.vmReturnCode}; @@ -45,7 +96,7 @@ struct runInfo { runInfo& operator+=(const runInfo& addend) { runtime += addend.runtime; gasUsage += addend.gasUsage; - if(!vmReturnCode && addend.vmReturnCode) { + if (!vmReturnCode && addend.vmReturnCode) { vmReturnCode = addend.vmReturnCode; } return *this; @@ -68,8 +119,8 @@ vm::Stack prepare_stack(td::Slice command) { vm::Stack stack; try { vm::GasLimits gas_limit; - int ret = vm::run_vm_code(vm::load_cell_slice_ref(cell), stack, 0 /*flags*/, nullptr /*data*/, - vm::VmLog{}, nullptr, &gas_limit, {}, {}, nullptr, 4); + int ret = vm::run_vm_code(vm::load_cell_slice_ref(cell), stack, 0 /*flags*/, nullptr /*data*/, vm::VmLog{}, nullptr, + &gas_limit, {}, c7, nullptr, ton::SUPPORTED_VERSION); CHECK(ret == 0); } catch (...) { LOG(FATAL) << "catch unhandled exception"; @@ -83,8 +134,8 @@ runInfo time_run_vm(td::Slice command, td::Ref stack) { CHECK(stack.is_unique()); try { vm::GasLimits gas_limit; - vm::VmState vm{vm::load_cell_slice_ref(cell), std::move(stack), gas_limit, 0, {}, vm::VmLog{}, {}, {}}; - vm.set_global_version(4); + vm::VmState vm{vm::load_cell_slice_ref(cell), std::move(stack), gas_limit, 0, {}, vm::VmLog{}, {}, c7}; + vm.set_global_version(ton::SUPPORTED_VERSION); std::clock_t cStart = std::clock(); int ret = ~vm.run(); std::clock_t cEnd = std::clock(); @@ -102,7 +153,7 @@ runtimeStats averageRuntime(td::Slice command, const vm::Stack& stack) { std::vector values; values.reserve(samples); td::Timer t0; - for(size_t i = 0; i < samples; ++i) { + for (size_t i = 0; i < samples; ++i) { const auto value_empty = time_run_vm(td::Slice(""), td::Ref(true, stack)); const auto value_code = time_run_vm(command, td::Ref(true, stack)); runInfo value{value_code.runtime - value_empty.runtime, value_code.gasUsage - value_empty.gasUsage, @@ -120,18 +171,16 @@ runtimeStats averageRuntime(td::Slice command, const vm::Stack& stack) { long double runtimeDiffSum = 0.0; long double gasDiffSum = 0.0; bool errored = false; - for(const auto value : values) { + for (const auto value : values) { const auto runtime = value.runtime - runtimeMean; const auto gasUsage = static_cast(value.gasUsage) - gasMean; runtimeDiffSum += runtime * runtime; gasDiffSum += gasUsage * gasUsage; errored = errored || value.errored(); } - return { - {runtimeMean, sqrtl(runtimeDiffSum / static_cast(samples))}, - {gasMean, sqrtl(gasDiffSum / static_cast(samples))}, - errored - }; + return {{runtimeMean, sqrtl(runtimeDiffSum / static_cast(samples))}, + {gasMean, sqrtl(gasDiffSum / static_cast(samples))}, + errored}; } runtimeStats timeInstruction(const std::string& setupCode, const std::string& toMeasure) { @@ -141,28 +190,33 @@ runtimeStats timeInstruction(const std::string& setupCode, const std::string& to int main(int argc, char** argv) { SET_VERBOSITY_LEVEL(verbosity_ERROR); - if(argc != 2 && argc != 3) { - std::cerr << - "This utility compares the timing of VM execution against the gas used.\n" - "It can be used to discover opcodes or opcode sequences that consume an " - "inordinate amount of computational resources relative to their gas cost.\n" - "\n" - "The utility expects two command line arguments: \n" - "The TVM code used to set up the stack and VM state followed by the TVM code to measure.\n" - "For example, to test the DIVMODC opcode:\n" - "\t$ " << argv[0] << " 80FF801C A90E 2>/dev/null\n" - "\tOPCODE,runtime mean,runtime stddev,gas mean,gas stddev\n" - "\tA90E,0.0066416,0.00233496,26,0\n" - "\n" - "Usage: " << argv[0] << " [TVM_SETUP_BYTECODE] TVM_BYTECODE\n" - "\tBYTECODE is either:\n" - "\t1. hex-encoded string (e.g. A90E for DIVMODC)\n" - "\t2. boc: (e.g. boc:te6ccgEBAgEABwABAogBAAJ7)" << std::endl << std::endl; + if (argc != 2 && argc != 3) { + std::cerr << "This utility compares the timing of VM execution against the gas used.\n" + "It can be used to discover opcodes or opcode sequences that consume an " + "inordinate amount of computational resources relative to their gas cost.\n" + "\n" + "The utility expects two command line arguments: \n" + "The TVM code used to set up the stack and VM state followed by the TVM code to measure.\n" + "For example, to test the DIVMODC opcode:\n" + "\t$ " + << argv[0] + << " 80FF801C A90E 2>/dev/null\n" + "\tOPCODE,runtime mean,runtime stddev,gas mean,gas stddev\n" + "\tA90E,0.0066416,0.00233496,26,0\n" + "\n" + "Usage: " + << argv[0] + << " [TVM_SETUP_BYTECODE] TVM_BYTECODE\n" + "\tBYTECODE is either:\n" + "\t1. hex-encoded string (e.g. A90E for DIVMODC)\n" + "\t2. boc: (e.g. boc:te6ccgEBAgEABwABAogBAAJ7)" + << std::endl + << std::endl; return 1; } std::cout << "OPCODE,runtime mean,runtime stddev,gas mean,gas stddev,error" << std::endl; std::string setup, code; - if(argc == 2) { + if (argc == 2) { setup = ""; code = argv[1]; } else { @@ -170,6 +224,7 @@ int main(int argc, char** argv) { code = argv[2]; } vm::init_vm().ensure(); + prepare_c7(); const auto time = timeInstruction(setup, code); std::cout << std::fixed << std::setprecision(9) << code << "," << time.runtime.mean << "," << time.runtime.stddev << "," << time.gasUsage.mean << "," << time.gasUsage.stddev << "," << (int)time.errored << std::endl; diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 2b3ccd85..44b49226 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -960,6 +960,9 @@ bool ValidateQuery::fetch_config_params() { } compute_phase_cfg_.prev_blocks_info = prev_blocks_info.move_as_ok(); } + if (compute_phase_cfg_.global_version >= 6) { + compute_phase_cfg_.unpacked_config_tuple = config_->get_unpacked_config_tuple(now_); + } compute_phase_cfg_.suspended_addresses = config_->get_suspended_addresses(now_); compute_phase_cfg_.size_limits = size_limits; } From 51d30e2f2b5c6d4895da5a59e5af4ff91ecad02d Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Fri, 26 Jan 2024 18:24:39 +0300 Subject: [PATCH 70/71] Add TVM instructions for working with nonzero-level cells (#880) Co-authored-by: SpyCheese --- crypto/fift/lib/Asm.fif | 6 +++ crypto/test/fift.cpp | 4 ++ crypto/test/fift/levels.fif | 75 +++++++++++++++++++++++++++++++++++++ crypto/vm/cellops.cpp | 57 +++++++++++++++++++++++++++- doc/GlobalVersions.md | 14 +++++++ test/regression-tests.ans | 1 + 6 files changed, 156 insertions(+), 1 deletion(-) create mode 100644 crypto/test/fift/levels.fif diff --git a/crypto/fift/lib/Asm.fif b/crypto/fift/lib/Asm.fif index 630f325a..5ad6ce47 100644 --- a/crypto/fift/lib/Asm.fif +++ b/crypto/fift/lib/Asm.fif @@ -813,6 +813,12 @@ x{D761} @Defop LDONES x{D762} @Defop LDSAME x{D764} @Defop SDEPTH x{D765} @Defop CDEPTH +x{D766} @Defop CLEVEL +x{D767} @Defop CLEVELMASK +{ s ]] 0 runvmx abort"exitcode != 0" ."Level = " . cr + dup [[ <{ CLEVELMASK }>s ]] 0 runvmx abort"exitcode != 0" ."Level mask = 0b" b. cr + dup dup [[ <{ 0 CHASHI DUP ROT 0 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_0 = " X. cr + dup dup [[ <{ 1 CHASHI DUP ROT 1 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_1 = " X. cr + dup dup [[ <{ 2 CHASHI DUP ROT 2 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_2 = " X. cr + dup dup [[ <{ 3 CHASHI DUP ROT 3 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_3 = " X. cr + dup dup [[ <{ 0 CDEPTHI DUP ROT 0 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_0 = " . cr + dup dup [[ <{ 1 CDEPTHI DUP ROT 1 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_1 = " . cr + dup dup [[ <{ 2 CDEPTHI DUP ROT 2 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_2 = " . cr + dup dup [[ <{ 3 CDEPTHI DUP ROT 3 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_3 = " . cr + drop + cr +} : print-all + +// Ordinary cell of level 0 + ref, b> ref, + ref, +b> +print-all + +// Prunned branch of level 1 +spec +print-all + +// Prunned branch of level 3 +spec +print-all + +// Prunned branch of level 3, mask 0b101 +spec +print-all + +// Tree with the previous cell inside +spec ref, + b> ref, +b> +print-all diff --git a/crypto/vm/cellops.cpp b/crypto/vm/cellops.cpp index af84a67e..94407231 100644 --- a/crypto/vm/cellops.cpp +++ b/crypto/vm/cellops.cpp @@ -1391,6 +1391,55 @@ int exec_slice_depth(VmState* st) { return 0; } +int exec_cell_level(VmState* st) { + Stack& stack = st->get_stack(); + VM_LOG(st) << "execute CLEVEL"; + auto cell = stack.pop_cell(); + stack.push_smallint(cell->get_level()); + return 0; +} + +int exec_cell_level_mask(VmState* st) { + Stack& stack = st->get_stack(); + VM_LOG(st) << "execute CLEVELMASK"; + auto cell = stack.pop_cell(); + stack.push_smallint(cell->get_level_mask().get_mask()); + return 0; +} + +int exec_cell_hash_i(VmState* st, unsigned args, bool var) { + unsigned i; + Stack& stack = st->get_stack(); + if (var) { + VM_LOG(st) << "execute CHASHIX"; + i = stack.pop_smallint_range(3); + } else { + i = args & 3; + VM_LOG(st) << "execute CHASHI " << i; + } + auto cell = stack.pop_cell(); + std::array hash = cell->get_hash(i).as_array(); + td::RefInt256 res{true}; + CHECK(res.write().import_bytes(hash.data(), hash.size(), false)); + stack.push_int(std::move(res)); + return 0; +} + +int exec_cell_depth_i(VmState* st, unsigned args, bool var) { + unsigned i; + Stack& stack = st->get_stack(); + if (var) { + VM_LOG(st) << "execute CDEPTHIX"; + i = stack.pop_smallint_range(3); + } else { + i = args & 3; + VM_LOG(st) << "execute CDEPTHI " << i; + } + auto cell = stack.pop_cell(); + stack.push_smallint(cell->get_depth(i)); + return 0; +} + void register_cell_deserialize_ops(OpcodeTable& cp0) { using namespace std::placeholders; cp0.insert(OpcodeInstr::mksimple(0xd0, 8, "CTOS", exec_cell_to_slice)) @@ -1479,7 +1528,13 @@ void register_cell_deserialize_ops(OpcodeTable& cp0) { .insert(OpcodeInstr::mksimple(0xd761, 16, "LDONES", std::bind(exec_load_same, _1, "LDONES", 1))) .insert(OpcodeInstr::mksimple(0xd762, 16, "LDSAME", std::bind(exec_load_same, _1, "LDSAME", -1))) .insert(OpcodeInstr::mksimple(0xd764, 16, "SDEPTH", exec_slice_depth)) - .insert(OpcodeInstr::mksimple(0xd765, 16, "CDEPTH", exec_cell_depth)); + .insert(OpcodeInstr::mksimple(0xd765, 16, "CDEPTH", exec_cell_depth)) + .insert(OpcodeInstr::mksimple(0xd766, 16, "CLEVEL", exec_cell_level)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xd767, 16, "CLEVELMASK", exec_cell_level_mask)->require_version(6)) + .insert(OpcodeInstr::mkfixed(0xd768 >> 2, 14, 2, instr::dump_1c_and(3, "CHASHI "), std::bind(exec_cell_hash_i, _1, _2, false))->require_version(6)) + .insert(OpcodeInstr::mkfixed(0xd76c >> 2, 14, 2, instr::dump_1c_and(3, "CDEPTHI "), std::bind(exec_cell_depth_i, _1, _2, false))->require_version(6)) + .insert(OpcodeInstr::mksimple(0xd770, 16, "CHASHIX ", std::bind(exec_cell_hash_i, _1, 0, true))->require_version(6)) + .insert(OpcodeInstr::mksimple(0xd771, 16, "CDEPTHIX ", std::bind(exec_cell_depth_i, _1, 0, true))->require_version(6)); } void register_cell_ops(OpcodeTable& cp0) { diff --git a/doc/GlobalVersions.md b/doc/GlobalVersions.md index 1264dc2b..c2d026e4 100644 --- a/doc/GlobalVersions.md +++ b/doc/GlobalVersions.md @@ -69,12 +69,26 @@ If the parameter is absent from the config, the value is null. * **6**: `ConfigParam 43` (size limits). ### New TVM instructions + +#### Fee calculation * `GETEXECUTIONPRICE` (`gas_used is_mc - price`) - calculates gas fee. * `GETSTORAGEPRICE` (`cells bits seconds is_mc - price`) - calculates storage fees (only current StoragePrices entry is used). * `GETFORWARDPRICE` (`cells bits is_mc - price`) - calculates forward fee. * `GETPRECOMPILEDGAS` (`- null`) - reserved, currently returns `null`. + `gas_used`, `cells`, `bits`, `time_delta` are integers in range `0..2^63-1`. +#### Cell operations +Operations for working with Merkle proofs, where cells can have non-zero level and multiple hashes. +* `CLEVEL` (`cell - level`) - returns level of the cell. +* `CLEVELMASK` (`cell - level_mask`) - returns level mask of the cell. +* `i CHASHI` (`cell - hash`) - returns `i`th hash of the cell. +* `i CDEPTHI` (`cell - depth`) - returns `i`th depth of the cell. +* `CHASHIX` (`cell i - hash`) - returns `i`th hash of the cell. +* `CDEPTHIX` (`cell i - depth`) - returns `i`th depth of the cell. + +`i` is in range `0..3`. + ### Other changes * `GLOBALID` gets `ConfigParam 19` from the tuple, not from the config dict. This decreases gas usage. * `SENDMSG` gets `ConfigParam 24/25` (message prices) from the tuple, not from the config dict, and also uses `ConfigParam 43` to get max_msg_cells. \ No newline at end of file diff --git a/test/regression-tests.ans b/test/regression-tests.ans index 0e07e3d6..ec013bf7 100644 --- a/test/regression-tests.ans +++ b/test/regression-tests.ans @@ -18,6 +18,7 @@ Test_Fift_test_fiftext_default 2b0db5d4d4bfbc705b959cc787540d7b3a21a71469eac5475 Test_Fift_test_fixed_default 278a19d56b773102caf5c1fe2997ea6c8d0d9e720eff8503feede6398a197eec Test_Fift_test_hash_ext_default 686fc5680feca5b3bb207768215b27f6872a95128762dee0d7f2c88bc492d62d Test_Fift_test_hmap_default c269246882039824bb5822e896c3e6e82ef8e1251b6b251f5af8ea9fb8d05067 +Test_Fift_test_levels_default 9fba4a7c98aec9000f42846d6e5fd820343ba61d68f9139dd16c88ccda757cf3 Test_Fift_test_namespaces_default e6419619c51332fb5e8bf22043ef415db686c47fe24f03061e5ad831014e7c6c Test_Fift_test_rist255_default f4d7558f200a656934f986145c19b1dedbe2ad029292a5a975576d6891e25fc4 Test_Fift_test_sort2_default 9b57d47e6a10e7d1bbb565db35400debf2f963031f434742a702ec76555a5d3a From a11ffb1637032faabea9119020f6c80ed678d0e7 Mon Sep 17 00:00:00 2001 From: EmelyanenkoK Date: Mon, 29 Jan 2024 16:38:42 +0300 Subject: [PATCH 71/71] Add DUEPAYMENT and some others + small fixes of new opcodes (#881) * Changes in TVM v6 * Rename some opcodes * Add due payment to c7 * Add GETORIGINALFWDFEE, GETGASFEESIMPLE, GETFORWARDFEESIMPLE * Bugfix in GETGASFEE * Fix typo --------- Co-authored-by: SpyCheese --- crypto/block/transaction.cpp | 9 +-- crypto/fift/lib/Asm.fif | 10 ++- crypto/smc-envelope/SmartContract.cpp | 1 + crypto/vm/tonops.cpp | 92 ++++++++++++++++++++------- doc/GlobalVersions.md | 28 ++++---- utils/opcode-timing.cpp | 1 + 6 files changed, 99 insertions(+), 42 deletions(-) diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index e08407a2..b3aa6c8f 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -1339,7 +1339,8 @@ Ref Transaction::prepare_vm_c7(const ComputePhaseConfig& cfg) const { } if (cfg.global_version >= 6) { tuple.push_back(cfg.unpacked_config_tuple.not_null() ? vm::StackEntry(cfg.unpacked_config_tuple) - : vm::StackEntry()); // unpacked_config_tuple:[...] + : vm::StackEntry()); // unpacked_config_tuple:[...] + tuple.push_back(due_payment.not_null() ? due_payment : td::zero_refint()); // due_payment:Integer } auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); LOG(DEBUG) << "SmartContractInfo initialized with " << vm::StackEntry(tuple_ref).to_string(); @@ -1938,9 +1939,9 @@ td::uint64 MsgPrices::compute_fwd_fees(td::uint64 cells, td::uint64 bits) const * @returns The computed forward fees for the message as td::RefInt256j. */ td::RefInt256 MsgPrices::compute_fwd_fees256(td::uint64 cells, td::uint64 bits) const { - return td::rshift( - td::make_refint(lump_price) + td::make_refint(bit_price) * bits + td::make_refint(cell_price) * cells, 16, - 1); // divide by 2^16 with ceil rounding + return td::make_refint(lump_price) + + td::rshift(td::make_refint(bit_price) * bits + td::make_refint(cell_price) * cells, 16, + 1); // divide by 2^16 with ceil rounding } /** diff --git a/crypto/fift/lib/Asm.fif b/crypto/fift/lib/Asm.fif index 5ad6ce47..92ceab6d 100644 --- a/crypto/fift/lib/Asm.fif +++ b/crypto/fift/lib/Asm.fif @@ -1302,16 +1302,20 @@ x{F82B} @Defop INCOMINGVALUE x{F82C} @Defop STORAGEFEES x{F82D} @Defop PREVBLOCKSINFOTUPLE x{F82E} @Defop UNPACKEDCONFIGTUPLE +x{F82F} @Defop DUEPAYMENT x{F830} @Defop CONFIGDICT x{F832} @Defop CONFIGPARAM x{F833} @Defop CONFIGOPTPARAM x{F83400} @Defop PREVMCBLOCKS x{F83401} @Defop PREVKEYBLOCK x{F835} @Defop GLOBALID -x{F836} @Defop GETEXECUTIONPRICE -x{F837} @Defop GETSTORAGEPRICE -x{F838} @Defop GETFORWARDPRICE +x{F836} @Defop GETGASFEE +x{F837} @Defop GETSTORAGEFEE +x{F838} @Defop GETFORWARDFEE x{F839} @Defop GETPRECOMPILEDGAS +x{F83A} @Defop GETORIGINALFWDFEE +x{F83B} @Defop GETGASFEESIMPLE +x{F83C} @Defop GETFORWARDFEESIMPLE x{F840} @Defop GETGLOBVAR { dup 1 31 @rangechk prepare_vm_c7(SmartContract::Args args, td::Ref cod } if (args.config && args.config.value()->get_global_version() >= 6) { tuple.push_back(args.config.value()->get_unpacked_config_tuple(now)); // unpacked_config_tuple + tuple.push_back(td::zero_refint()); // due_payment } auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); //LOG(DEBUG) << "SmartContractInfo initialized with " << vm::StackEntry(tuple).to_string(); diff --git a/crypto/vm/tonops.cpp b/crypto/vm/tonops.cpp index 753bbc0a..9ce7fe9c 100644 --- a/crypto/vm/tonops.cpp +++ b/crypto/vm/tonops.cpp @@ -276,11 +276,7 @@ int exec_get_global_id(VmState* st) { return 0; } -int exec_get_execution_price(VmState* st) { - VM_LOG(st) << "execute GETEXECUTIONPRICE"; - Stack& stack = st->get_stack(); - bool is_masterchain = stack.pop_bool(); - td::uint64 gas = stack.pop_long_range(std::numeric_limits::max(), 0); +static block::GasLimitsPrices get_gas_prices(VmState* st, bool is_masterchain) { Ref cs = get_unpacked_config_param(st, is_masterchain ? 2 : 3); if (cs.is_null()) { throw VmError{Excno::type_chk, "intermediate value is not a slice"}; @@ -289,13 +285,33 @@ int exec_get_execution_price(VmState* st) { if (r_prices.is_error()) { throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; } - block::GasLimitsPrices prices = r_prices.move_as_ok(); + return r_prices.move_as_ok(); +} + +static block::MsgPrices get_msg_prices(VmState* st, bool is_masterchain) { + Ref cs = get_unpacked_config_param(st, is_masterchain ? 4 : 5); + if (cs.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a slice"}; + } + auto r_prices = block::Config::do_get_msg_prices(*cs, is_masterchain ? 24 : 25); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; + } + return r_prices.move_as_ok(); +} + +int exec_get_gas_fee(VmState* st) { + VM_LOG(st) << "execute GETGASFEE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 gas = stack.pop_long_range(std::numeric_limits::max(), 0); + block::GasLimitsPrices prices = get_gas_prices(st, is_masterchain); stack.push_int(prices.compute_gas_price(gas)); return 0; } -int exec_get_storage_price(VmState* st) { - VM_LOG(st) << "execute GETSTORAGEPRICE"; +int exec_get_storage_fee(VmState* st) { + VM_LOG(st) << "execute GETSTORAGEFEE"; Stack& stack = st->get_stack(); bool is_masterchain = stack.pop_bool(); td::int64 delta = stack.pop_long_range(std::numeric_limits::max(), 0); @@ -325,21 +341,13 @@ int exec_get_storage_price(VmState* st) { return 0; } -int exec_get_forward_price(VmState* st) { - VM_LOG(st) << "execute GETFORWARDPRICE"; +int exec_get_forward_fee(VmState* st) { + VM_LOG(st) << "execute GETFORWARDFEE"; Stack& stack = st->get_stack(); bool is_masterchain = stack.pop_bool(); td::uint64 bits = stack.pop_long_range(std::numeric_limits::max(), 0); td::uint64 cells = stack.pop_long_range(std::numeric_limits::max(), 0); - Ref cs = get_unpacked_config_param(st, is_masterchain ? 4 : 5); - if (cs.is_null()) { - throw VmError{Excno::type_chk, "intermediate value is not a slice"}; - } - auto r_prices = block::Config::do_get_msg_prices(*cs, is_masterchain ? 24 : 25); - if (r_prices.is_error()) { - throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; - } - block::MsgPrices prices = r_prices.move_as_ok(); + block::MsgPrices prices = get_msg_prices(st, is_masterchain); stack.push_int(prices.compute_fwd_fees256(cells, bits)); return 0; } @@ -351,6 +359,41 @@ int exec_get_precompiled_gas(VmState* st) { return 0; } +int exec_get_original_fwd_fee(VmState* st) { + VM_LOG(st) << "execute GETORIGINALFWDFEE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::RefInt256 fwd_fee = stack.pop_int_finite(); + if (fwd_fee->sgn() < 0) { + throw VmError{Excno::range_chk, "fwd_fee is negative"}; + } + block::MsgPrices prices = get_msg_prices(st, is_masterchain); + stack.push_int(td::muldiv(fwd_fee, td::make_refint(1 << 16), td::make_refint((1 << 16) - prices.first_frac))); + return 0; +} + +int exec_get_gas_fee_simple(VmState* st) { + VM_LOG(st) << "execute GETGASFEESIMPLE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 gas = stack.pop_long_range(std::numeric_limits::max(), 0); + block::GasLimitsPrices prices = get_gas_prices(st, is_masterchain); + stack.push_int(td::rshift(td::make_refint(prices.gas_price) * gas, 16, 1)); + return 0; +} + +int exec_get_forward_fee_simple(VmState* st) { + VM_LOG(st) << "execute GETFORWARDFEESIMPLE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 bits = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 cells = stack.pop_long_range(std::numeric_limits::max(), 0); + block::MsgPrices prices = get_msg_prices(st, is_masterchain); + stack.push_int(td::rshift(td::make_refint(prices.bit_price) * bits + td::make_refint(prices.cell_price) * cells, 16, + 1)); // divide by 2^16 with ceil rounding + return 0; +} + void register_ton_config_ops(OpcodeTable& cp0) { using namespace std::placeholders; cp0.insert(OpcodeInstr::mkfixedrange(0xf820, 0xf823, 16, 4, instr::dump_1c("GETPARAM "), exec_get_var_param)) @@ -366,17 +409,20 @@ void register_ton_config_ops(OpcodeTable& cp0) { .insert(OpcodeInstr::mksimple(0xf82c, 16, "STORAGEFEES", std::bind(exec_get_param, _1, 12, "STORAGEFEES"))) .insert(OpcodeInstr::mksimple(0xf82d, 16, "PREVBLOCKSINFOTUPLE", std::bind(exec_get_param, _1, 13, "PREVBLOCKSINFOTUPLE"))) .insert(OpcodeInstr::mksimple(0xf82e, 16, "UNPACKEDCONFIGTUPLE", std::bind(exec_get_param, _1, 14, "UNPACKEDCONFIGTUPLE"))) - .insert(OpcodeInstr::mkfixedrange(0xf82f, 0xf830, 16, 4, instr::dump_1c("GETPARAM "), exec_get_var_param)) + .insert(OpcodeInstr::mksimple(0xf82f, 16, "DUEPAYMENT", std::bind(exec_get_param, _1, 15, "DUEPAYMENT"))) .insert(OpcodeInstr::mksimple(0xf830, 16, "CONFIGDICT", exec_get_config_dict)) .insert(OpcodeInstr::mksimple(0xf832, 16, "CONFIGPARAM", std::bind(exec_get_config_param, _1, false))) .insert(OpcodeInstr::mksimple(0xf833, 16, "CONFIGOPTPARAM", std::bind(exec_get_config_param, _1, true))) .insert(OpcodeInstr::mksimple(0xf83400, 24, "PREVMCBLOCKS", std::bind(exec_get_prev_blocks_info, _1, 0, "PREVMCBLOCKS"))->require_version(4)) .insert(OpcodeInstr::mksimple(0xf83401, 24, "PREVKEYBLOCK", std::bind(exec_get_prev_blocks_info, _1, 1, "PREVKEYBLOCK"))->require_version(4)) .insert(OpcodeInstr::mksimple(0xf835, 16, "GLOBALID", exec_get_global_id)->require_version(4)) - .insert(OpcodeInstr::mksimple(0xf836, 16, "GETEXECUTIONPRICE", exec_get_execution_price)->require_version(6)) - .insert(OpcodeInstr::mksimple(0xf837, 16, "GETSTORAGEPRICE", exec_get_storage_price)->require_version(6)) - .insert(OpcodeInstr::mksimple(0xf838, 16, "GETFORWARDPRICE", exec_get_forward_price)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf836, 16, "GETGASFEE", exec_get_gas_fee)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf837, 16, "GETSTORAGEFEE", exec_get_storage_fee)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf838, 16, "GETFORWARDFEE", exec_get_forward_fee)->require_version(6)) .insert(OpcodeInstr::mksimple(0xf839, 16, "GETPRECOMPILEDGAS", exec_get_precompiled_gas)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf83a, 16, "GETORIGINALFWDFEE", exec_get_original_fwd_fee)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf83b, 16, "GETGASFEESIMPLE", exec_get_gas_fee_simple)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf83c, 16, "GETFORWARDFEESIMPLE", exec_get_forward_fee_simple)->require_version(6)) .insert(OpcodeInstr::mksimple(0xf840, 16, "GETGLOBVAR", exec_get_global_var)) .insert(OpcodeInstr::mkfixedrange(0xf841, 0xf860, 16, 5, instr::dump_1c_and(31, "GETGLOB "), exec_get_global)) .insert(OpcodeInstr::mksimple(0xf860, 16, "SETGLOBVAR", exec_set_global_var)) diff --git a/doc/GlobalVersions.md b/doc/GlobalVersions.md index c2d026e4..7e3b4ce9 100644 --- a/doc/GlobalVersions.md +++ b/doc/GlobalVersions.md @@ -58,23 +58,27 @@ See [this post](https://t.me/tonstatus/88) for details. ## Version 6 ### c7 tuple -**c7** tuple extended from 14 to 15 elements. The new element is a tuple that contains some config parameters as cell slices. -If the parameter is absent from the config, the value is null. -* **0**: `StoragePrices` from `ConfigParam 18`. Not the whole dict, but only the one StoragePrices entry (one which corresponds to the current time). -* **1**: `ConfigParam 19` (global id). -* **2**: `ConfigParam 20` (mc gas prices). -* **3**: `ConfigParam 21` (gas prices). -* **4**: `ConfigParam 24` (mc fwd fees). -* **5**: `ConfigParam 25` (fwd fees). -* **6**: `ConfigParam 43` (size limits). +**c7** tuple extended from 14 to 16 elements: +* **14**: tuple that contains some config parameters as cell slices. If the parameter is absent from the config, the value is null. Asm opcode: `UNPACKEDCONFIGTUPLE`. + * **0**: `StoragePrices` from `ConfigParam 18`. Not the whole dict, but only the one StoragePrices entry (one which corresponds to the current time). + * **1**: `ConfigParam 19` (global id). + * **2**: `ConfigParam 20` (mc gas prices). + * **3**: `ConfigParam 21` (gas prices). + * **4**: `ConfigParam 24` (mc fwd fees). + * **5**: `ConfigParam 25` (fwd fees). + * **6**: `ConfigParam 43` (size limits). +* **15**: "[due payment](https://github.com/ton-blockchain/ton/blob/8a9ff339927b22b72819c5125428b70c406da631/crypto/block/block.tlb#L237)" - current debt for storage fee (nanotons). Asm opcode: `DUEPAYMENT`. ### New TVM instructions #### Fee calculation -* `GETEXECUTIONPRICE` (`gas_used is_mc - price`) - calculates gas fee. -* `GETSTORAGEPRICE` (`cells bits seconds is_mc - price`) - calculates storage fees (only current StoragePrices entry is used). -* `GETFORWARDPRICE` (`cells bits is_mc - price`) - calculates forward fee. +* `GETGASFEE` (`gas_used is_mc - price`) - calculates gas fee. +* `GETSTORAGEFEE` (`cells bits seconds is_mc - price`) - calculates storage fees (only current StoragePrices entry is used). +* `GETFORWARDFEE` (`cells bits is_mc - price`) - calculates forward fee. * `GETPRECOMPILEDGAS` (`- null`) - reserved, currently returns `null`. +* `GETORIGINALFWDFEE` (`fwd_fee is_mc - orig_fwd_fee`) - calculate `fwd_fee * 2^16 / first_frac`. Can be used to get the original `fwd_fee` of the message. +* `GETGASFEESIMPLE` (`gas_used is_mc - price`) - same as `GETGASFEE`, but without flat price (just `(gas_used * price) / 2^16`). +* `GETFORWARDFEESIMPLE` (`cells bits is_mc - price`) - same as `GETFORWARDFEE`, but without lump price (just `(bits*bit_price + cells*cell_price) / 2^16`). `gas_used`, `cells`, `bits`, `time_delta` are integers in range `0..2^63-1`. diff --git a/utils/opcode-timing.cpp b/utils/opcode-timing.cpp index d68928d0..876ba109 100644 --- a/utils/opcode-timing.cpp +++ b/utils/opcode-timing.cpp @@ -57,6 +57,7 @@ void prepare_c7() { } else { tuple.push_back(vm::StackEntry()); } + tuple.push_back(td::zero_refint()); auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); c7 = vm::make_tuple_ref(std::move(tuple_ref)); }