diff --git a/overlay/overlay-manager.cpp b/overlay/overlay-manager.cpp index 41750b4f..43192190 100644 --- a/overlay/overlay-manager.cpp +++ b/overlay/overlay-manager.cpp @@ -90,12 +90,20 @@ void OverlayManager::delete_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdSho } void OverlayManager::create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, - std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) { + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope) { + create_public_overlay_ex(local_id, std::move(overlay_id), std::move(callback), std::move(rules), std::move(scope), + true); +} + +void OverlayManager::create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope, bool announce_self) { CHECK(!dht_node_.empty()); auto id = overlay_id.compute_short_id(); register_overlay(local_id, id, Overlay::create(keyring_, adnl_, actor_id(this), dht_node_, local_id, std::move(overlay_id), - std::move(callback), std::move(rules), scope)); + std::move(callback), std::move(rules), scope, announce_self)); } void OverlayManager::create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, diff --git a/overlay/overlay-manager.h b/overlay/overlay-manager.h index d9739bf3..fe1166ac 100644 --- a/overlay/overlay-manager.h +++ b/overlay/overlay-manager.h @@ -52,6 +52,9 @@ class OverlayManager : public Overlays { void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) override; + void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, + std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope, + bool announce_self) override; void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) override; diff --git a/overlay/overlay-peers.cpp b/overlay/overlay-peers.cpp index 174dbdce..409f0993 100644 --- a/overlay/overlay-peers.cpp +++ b/overlay/overlay-peers.cpp @@ -171,7 +171,9 @@ void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::Result promise) { std::vector> vec; - vec.emplace_back(node.tl()); + if (announce_self_) { + vec.emplace_back(node.tl()); + } for (td::uint32 i = 0; i < nodes_to_send(); i++) { auto P = get_random_peer(true); diff --git a/overlay/overlay.cpp b/overlay/overlay.cpp index 4c59079f..a562beb6 100644 --- a/overlay/overlay.cpp +++ b/overlay/overlay.cpp @@ -37,10 +37,10 @@ td::actor::ActorOwn Overlay::create(td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope) { + OverlayPrivacyRules rules, td::string scope, bool announce_self) { auto R = td::actor::create_actor("overlay", keyring, adnl, manager, dht_node, local_id, std::move(overlay_id), true, std::vector(), - std::move(callback), std::move(rules), scope); + std::move(callback), std::move(rules), scope, announce_self); return td::actor::ActorOwn(std::move(R)); } @@ -60,7 +60,7 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope) + OverlayPrivacyRules rules, td::string scope, bool announce_self) : keyring_(keyring) , adnl_(adnl) , manager_(manager) @@ -70,7 +70,8 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor , callback_(std::move(callback)) , public_(pub) , rules_(std::move(rules)) - , scope_(scope) { + , scope_(scope) + , announce_self_(announce_self) { overlay_id_ = id_full_.compute_short_id(); VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private"); @@ -328,6 +329,10 @@ void OverlayImpl::receive_dht_nodes(td::Result res, bool dummy) { VLOG(OVERLAY_NOTICE) << this << ": can not get value from DHT: " << res.move_as_error(); } + if (!announce_self_) { + return; + } + VLOG(OVERLAY_INFO) << this << ": adding self node to DHT overlay's nodes"; auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), oid = print_id()](td::Result R) { if (R.is_error()) { diff --git a/overlay/overlay.h b/overlay/overlay.h index 51eab1b4..a5f7b3a4 100644 --- a/overlay/overlay.h +++ b/overlay/overlay.h @@ -42,7 +42,7 @@ class Overlay : public td::actor::Actor { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope); + OverlayPrivacyRules rules, td::string scope, bool announce_self = true); static td::actor::ActorOwn create(td::actor::ActorId keyring, td::actor::ActorId adnl, td::actor::ActorId manager, diff --git a/overlay/overlay.hpp b/overlay/overlay.hpp index 344ce1a0..86d37d5b 100644 --- a/overlay/overlay.hpp +++ b/overlay/overlay.hpp @@ -124,7 +124,7 @@ class OverlayImpl : public Overlay { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }"); + OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool announce_self = true); void update_dht_node(td::actor::ActorId dht) override { dht_node_ = dht; } @@ -366,6 +366,7 @@ class OverlayImpl : public Overlay { bool semi_public_ = false; OverlayPrivacyRules rules_; td::string scope_; + bool announce_self_ = true; std::map> certs_; class CachedEncryptor : public td::ListNode { diff --git a/overlay/overlays.h b/overlay/overlays.h index 45316254..e12bbbdb 100644 --- a/overlay/overlays.h +++ b/overlay/overlays.h @@ -193,7 +193,11 @@ class Overlays : public td::actor::Actor { virtual void update_dht_node(td::actor::ActorId dht) = 0; virtual void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, - std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) = 0; + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope) = 0; + virtual void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, + std::unique_ptr callback, OverlayPrivacyRules rules, + td::string scope, bool announce_self) = 0; virtual void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) = 0; diff --git a/rldp-http-proxy/CMakeLists.txt b/rldp-http-proxy/CMakeLists.txt index 7ba66ccb..eefd1dd8 100644 --- a/rldp-http-proxy/CMakeLists.txt +++ b/rldp-http-proxy/CMakeLists.txt @@ -1,5 +1,5 @@ cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) -add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h TonlibClient.h TonlibClient.cpp DNSResolver.cpp) +add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h DNSResolver.cpp) target_include_directories(rldp-http-proxy PUBLIC $) target_link_libraries(rldp-http-proxy PRIVATE tonhttp rldp dht tonlib git) diff --git a/rldp-http-proxy/DNSResolver.cpp b/rldp-http-proxy/DNSResolver.cpp index 1fb197a5..1a5705d3 100644 --- a/rldp-http-proxy/DNSResolver.cpp +++ b/rldp-http-proxy/DNSResolver.cpp @@ -30,7 +30,8 @@ static const double CACHE_TIMEOUT_HARD = 300.0; static const double CACHE_TIMEOUT_SOFT = 270.0; -DNSResolver::DNSResolver(td::actor::ActorId tonlib_client) : tonlib_client_(std::move(tonlib_client)) { +DNSResolver::DNSResolver(td::actor::ActorId tonlib_client) + : tonlib_client_(std::move(tonlib_client)) { } void DNSResolver::start_up() { @@ -39,14 +40,15 @@ void DNSResolver::start_up() { void DNSResolver::sync() { auto obj = tonlib_api::make_object(); - auto P = td::PromiseCreator::lambda([SelfId = - actor_id(this)](td::Result> R) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)]( + td::Result> R) { if (R.is_error()) { LOG(WARNING) << "Sync error: " << R.move_as_error(); ton::delay_action([SelfId]() { td::actor::send_closure(SelfId, &DNSResolver::sync); }, td::Timestamp::in(5.0)); } }); - td::actor::send_closure(tonlib_client_, &TonlibClient::send_request, std::move(obj), std::move(P)); + td::actor::send_closure(tonlib_client_, &tonlib::TonlibClientWrapper::send_request, std::move(obj), + std::move(P)); } void DNSResolver::resolve(std::string host, td::Promise promise) { @@ -66,18 +68,13 @@ void DNSResolver::resolve(std::string host, td::Promise(nullptr, host, category, 16); auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), promise = std::move(promise), host = std::move(host)]( - td::Result> R) mutable { + td::Result> R) mutable { if (R.is_error()) { if (promise) { promise.set_result(R.move_as_error()); } } else { - auto v = R.move_as_ok(); - auto obj = dynamic_cast(v.get()); - if (obj == nullptr) { - promise.set_result(td::Status::Error("invalid response from tonlib")); - return; - } + auto obj = R.move_as_ok(); ton::adnl::AdnlNodeIdShort id; td::uint32 cnt = 0; for (auto &e : obj->entries_) { @@ -106,7 +103,8 @@ void DNSResolver::resolve(std::string host, td::Promise, + std::move(obj), std::move(P)); } void DNSResolver::save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id) { diff --git a/rldp-http-proxy/DNSResolver.h b/rldp-http-proxy/DNSResolver.h index ba52a67e..6b9d31da 100644 --- a/rldp-http-proxy/DNSResolver.h +++ b/rldp-http-proxy/DNSResolver.h @@ -25,13 +25,13 @@ */ #pragma once #include "td/actor/actor.h" -#include "TonlibClient.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" #include "adnl/adnl.h" #include "td/actor/PromiseFuture.h" class DNSResolver : public td::actor::Actor { public: - explicit DNSResolver(td::actor::ActorId tonlib_client); + explicit DNSResolver(td::actor::ActorId tonlib_client); void start_up() override; void resolve(std::string host, td::Promise promise); @@ -40,7 +40,7 @@ class DNSResolver : public td::actor::Actor { void sync(); void save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id); - td::actor::ActorId tonlib_client_; + td::actor::ActorId tonlib_client_; struct CacheEntry { ton::adnl::AdnlNodeIdShort id_; diff --git a/rldp-http-proxy/rldp-http-proxy.cpp b/rldp-http-proxy/rldp-http-proxy.cpp index 1c76651f..9b9e958f 100644 --- a/rldp-http-proxy/rldp-http-proxy.cpp +++ b/rldp-http-proxy/rldp-http-proxy.cpp @@ -54,7 +54,7 @@ #include "td/utils/BufferedFd.h" #include "common/delay.h" -#include "TonlibClient.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" #include "DNSResolver.h" #if TD_DARWIN || TD_LINUX @@ -919,7 +919,7 @@ class RldpHttpProxy : public td::actor::Actor { auto tonlib_options = tonlib_api::make_object( tonlib_api::make_object(conf_dataR.move_as_ok().as_slice().str(), "", false, false), tonlib_api::make_object()); - tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); + tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); dns_resolver_ = td::actor::create_actor("dnsresolver", tonlib_client_.get()); } @@ -1315,7 +1315,7 @@ class RldpHttpProxy : public td::actor::Actor { std::string db_root_ = "."; bool proxy_all_ = false; - td::actor::ActorOwn tonlib_client_; + td::actor::ActorOwn tonlib_client_; td::actor::ActorOwn dns_resolver_; std::maptransfer_id, std::move(error)); } else { - LOG(ERROR) << "Timeout on unknown transfer " << limit->transfer_id.to_hex(); + VLOG(RLDP_WARNING) << "Timeout on unknown transfer " << limit->transfer_id.to_hex(); } } limits_set_.erase(*limit); @@ -113,7 +114,7 @@ void RldpConnection::send(TransferId transfer_id, td::BufferSlice data, td::Time td::Random::secure_bytes(transfer_id.as_slice()); } else { if (outbound_transfers_.find(transfer_id) != outbound_transfers_.end()) { - LOG(WARNING) << "Skip resend of " << transfer_id.to_hex(); + VLOG(RLDP_WARNING) << "Skip resend of " << transfer_id.to_hex(); return; } } @@ -143,17 +144,6 @@ void RldpConnection::loop_bbr(td::Timestamp now) { double speed = bbr_.get_rate(); td::uint32 congestion_window = bbr_.get_window_size(); - static td::Timestamp next; - //FIXME: remove this UNSAFE debug output - if (next.is_in_past(now)) { - next = td::Timestamp::in(1, now); - if (td::actor::core::ActorExecuteContext::get()->actor().get_actor_info_ptr()->get_name() == "Alice") { - LOG(ERROR) << "speed=" << td::format::as_size((td::int64)speed * 768) << " " - << "cgw=" << td::format::as_size((td::int64)congestion_window * 768) << " " - << "loss=" << loss_stats_.loss * 100 << "%"; - } - } - pacer_.set_speed(speed); congestion_window_ = congestion_window; } @@ -301,7 +291,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) { max_size = limit_it->max_size; } if (total_size > max_size) { - LOG(INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size; + VLOG(RLDP_INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size; return; } @@ -324,7 +314,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) { } return {}; } - if (in_part->receiver.on_received(part.seqno_, td::Timestamp::now())) { + if (in_part->receiver.on_received(part.seqno_ + 1, td::Timestamp::now())) { TRY_STATUS_PREFIX(in_part->decoder->add_symbol({static_cast(part.seqno_), std::move(part.data_)}), td::Status::Error(ErrorCode::protoviolation, "invalid symbol")); if (in_part->decoder->may_try_decode()) { diff --git a/rldp2/RldpReceiver.cpp b/rldp2/RldpReceiver.cpp index 169bd551..cf0136c2 100644 --- a/rldp2/RldpReceiver.cpp +++ b/rldp2/RldpReceiver.cpp @@ -33,9 +33,8 @@ void RldpReceiver::on_ack_sent(td::Timestamp now) { //LOG(ERROR) << "RESEND ACK " << cnt_; } cnt_++; - if (cnt_ > 7) { - send_ack_at_ = {}; - } else { + send_ack_at_ = {}; + if (cnt_ <= 7) { send_ack_at_.relax(td::Timestamp::at(now.at() + config_.ack_delay * (1 << cnt_))); } } diff --git a/rldp2/RldpSender.cpp b/rldp2/RldpSender.cpp index 1f2dc26a..00c71956 100644 --- a/rldp2/RldpSender.cpp +++ b/rldp2/RldpSender.cpp @@ -49,7 +49,6 @@ SenderPackets::Update RldpSender::on_ack(const Ack &ack, double ack_delay, td::T //LOG(ERROR) << "ON ACK " << ack.max_seqno << " " << ack.received_mask << " " << ack.received_count; auto update = packets_.on_ack(ack); if (!update.was_max_updated) { - CHECK(!update.new_received); return update; } diff --git a/rldp2/RttStats.cpp b/rldp2/RttStats.cpp index 8269cfe4..2c3e750d 100644 --- a/rldp2/RttStats.cpp +++ b/rldp2/RttStats.cpp @@ -18,17 +18,18 @@ */ #include "RttStats.h" +#include "rldp.hpp" #include namespace ton { namespace rldp2 { void RttStats::on_rtt_sample(double rtt_sample, double ack_delay, td::Timestamp now) { if (rtt_sample < 0.001 || rtt_sample > 10) { - LOG(WARNING) << "Suspicious rtt sample " << rtt_sample; + VLOG(RLDP_WARNING) << "Suspicious rtt sample " << rtt_sample; return; } if (ack_delay < -1e-9 || ack_delay > 10) { - LOG(WARNING) << "Suspicious ack_delay " << ack_delay; + VLOG(RLDP_WARNING) << "Suspicious ack_delay " << ack_delay; return; } rtt_sample = td::max(0.01, rtt_sample); diff --git a/storage/Bitset.h b/storage/Bitset.h index 55380742..2c88bc6a 100644 --- a/storage/Bitset.h +++ b/storage/Bitset.h @@ -53,13 +53,28 @@ struct Bitset { } auto mask = 1 << bit_i; if ((bits_[i] & mask) == 0) { - bits_[i] |= mask; + bits_[i] |= (char)mask; count_++; return true; } return false; } + bool set_zero(size_t offset) { + auto i = offset / 8; + if (i >= bits_.size()) { + return false; + } + auto bit_i = offset % 8; + auto mask = 1 << bit_i; + if (bits_[i] & mask) { + bits_[i] &= (char)~mask; + count_--; + return true; + } + return false; + } + size_t ones_count() const { return count_; } diff --git a/storage/CMakeLists.txt b/storage/CMakeLists.txt index 0e5cf085..a53a42b8 100644 --- a/storage/CMakeLists.txt +++ b/storage/CMakeLists.txt @@ -17,6 +17,7 @@ set(STORAGE_SOURCE TorrentInfo.cpp TorrentMeta.cpp + db.h Bitset.h LoadSpeed.h MerkleTree.h @@ -24,32 +25,33 @@ set(STORAGE_SOURCE PartsHelper.h PeerActor.h PeerState.h - SharedState.h Torrent.h TorrentCreator.h TorrentHeader.h TorrentInfo.h TorrentMeta.h -) - + PeerManager.h + MicrochunkTree.h MicrochunkTree.cpp) set(STORAGE_CLI_SOURCE storage-cli.cpp ) add_library(storage ${STORAGE_SOURCE}) target_link_libraries(storage tdutils tdactor tddb ton_crypto tl_api ${JEMALLOC_LIBRARIES}) -target_include_directories(storage PUBLIC - $ +target_include_directories(storage PUBLIC + $ ) add_executable(storage-cli ${STORAGE_CLI_SOURCE}) target_link_libraries(storage-cli storage overlay tdutils tdactor adnl tl_api dht - rldp rldp2 catchain validatorsession full-node validator ton_validator validator - fift-lib memprof terminal git ${JEMALLOC_LIBRARIES}) + rldp rldp2 fift-lib memprof terminal git ${JEMALLOC_LIBRARIES}) set(STORAGE_TEST_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/test/storage.cpp PARENT_SCOPE ) + +add_subdirectory(storage-daemon) + # Do not install it yet #install(TARGETS storage-cli RUNTIME DESTINATION bin) diff --git a/storage/LoadSpeed.cpp b/storage/LoadSpeed.cpp index c2fcfcd4..ddba2b85 100644 --- a/storage/LoadSpeed.cpp +++ b/storage/LoadSpeed.cpp @@ -29,7 +29,7 @@ void LoadSpeed::add(std::size_t size, td::Timestamp now) { } double LoadSpeed::speed(td::Timestamp now) const { update(now); - return total_size_ / duration(); + return (double)total_size_ / duration(now); } td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) { @@ -37,15 +37,15 @@ td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) { } void LoadSpeed::update(td::Timestamp now) const { - while (duration() > 60) { + while (duration(now) > 30) { total_size_ -= events_.front().size; events_.pop(); } } -double LoadSpeed::duration() const { +double LoadSpeed::duration(td::Timestamp now) const { double res = 5; - if (events_.size() > 1) { - res = std::max(res, events_.back().at.at() - events_.front().at.at()); + if (!events_.empty()) { + res = std::max(res, now.at() - events_.front().at.at()); } return res; } diff --git a/storage/LoadSpeed.h b/storage/LoadSpeed.h index d936f541..92d947f7 100644 --- a/storage/LoadSpeed.h +++ b/storage/LoadSpeed.h @@ -26,19 +26,19 @@ namespace ton { class LoadSpeed { public: - void add(std::size_t size, td::Timestamp now); + void add(td::uint64 size, td::Timestamp now = td::Timestamp::now()); double speed(td::Timestamp now = td::Timestamp::now()) const; friend td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed); private: struct Event { - std::size_t size; + td::uint64 size; td::Timestamp at; }; mutable td::VectorQueue events_; - mutable std::size_t total_size_{0}; + mutable td::uint64 total_size_{0}; - double duration() const; + double duration(td::Timestamp now) const; void update(td::Timestamp now) const; }; } // namespace ton diff --git a/storage/MerkleTree.cpp b/storage/MerkleTree.cpp index d35a1f39..2ba4aeb8 100644 --- a/storage/MerkleTree.cpp +++ b/storage/MerkleTree.cpp @@ -28,152 +28,51 @@ #include "vm/excno.hpp" namespace ton { -static td::Ref unpack_proof(td::Ref root) { +static td::Result> unpack_proof(td::Ref root) { vm::CellSlice cs(vm::NoVm(), root); - CHECK(cs.special_type() == vm::Cell::SpecialType::MerkleProof); + if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) { + return td::Status::Error("Not a merkle proof"); + } return cs.fetch_ref(); } -td::uint32 MerkleTree::get_depth() const { - return log_n_; -} -td::Ref MerkleTree::get_root(size_t depth_limit) const { - if (depth_limit > log_n_ || root_proof_.is_null()) { - return root_proof_; +MerkleTree::MerkleTree(size_t pieces_count, td::Bits256 root_hash) + : pieces_count_(pieces_count), root_hash_(root_hash) { + depth_ = 0; + n_ = 1; + while (n_ < pieces_count_) { + ++depth_; + n_ <<= 1; } - - auto usage_tree = std::make_shared(); - auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); - auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); - do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_), depth_limit); - auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); - CHECK(res.not_null()); - return res; } -void MerkleTree::do_gen_proof(td::Ref node, td::Ref node_raw, size_t depth_limit) const { - if (depth_limit == 0) { - return; +static td::Ref build_tree(td::Bits256 *hashes, size_t len) { + if (len == 1) { + return vm::CellBuilder().store_bytes(hashes[0].as_slice()).finalize(); } - // check if it is possible to load node without breaking virtualization - vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw)); - if (cs_raw.is_special()) { - return; + td::Ref l = build_tree(hashes, len / 2); + td::Ref r = build_tree(hashes + len / 2, len / 2); + return vm::CellBuilder().store_ref(l).store_ref(r).finalize(); +}; + +MerkleTree::MerkleTree(std::vector hashes) : pieces_count_(hashes.size()) { + depth_ = 0; + n_ = 1; + while (n_ < pieces_count_) { + ++depth_; + n_ <<= 1; + } + hashes.resize(n_, td::Bits256::zero()); + td::Ref root = build_tree(hashes.data(), n_); + root_hash_ = root->get_hash().bits(); + root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root)); +} + +static td::Status do_validate_proof(td::Ref node, size_t depth) { + if (node->get_depth(0) != depth) { + return td::Status::Error("Depth mismatch"); } vm::CellSlice cs(vm::NoVm(), std::move(node)); - while (cs.have_refs()) { - do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1); - } -} - -td::Bits256 MerkleTree::get_root_hash() const { - CHECK(root_hash_); - return root_hash_.value(); -} - -MerkleTree::MerkleTree(size_t chunks_count, td::Bits256 root_hash) { - init_begin(chunks_count); - root_hash_ = root_hash; - init_finish(); -} - -MerkleTree::MerkleTree(size_t chunks_count, td::Ref root_proof) { - init_begin(chunks_count); - root_hash_ = unpack_proof(root_proof)->get_hash(0).as_array(); - root_proof_ = std::move(root_proof); - init_finish(); -} - -MerkleTree::MerkleTree(td::Span chunks) { - init_begin(chunks.size()); - - for (size_t i = 0; i < chunks.size(); i++) { - CHECK(chunks[i].index == i); - init_add_chunk(i, chunks[i].hash.as_slice()); - } - - init_finish(); -} - -void MerkleTree::init_begin(size_t chunks_count) { - log_n_ = 0; - while ((size_t(1) << log_n_) < chunks_count) { - log_n_++; - } - n_ = size_t(1) << log_n_; - total_blocks_ = chunks_count; - mark_.resize(n_ * 2); - proof_.resize(n_ * 2); - - td::UInt256 null{}; - auto cell = vm::CellBuilder().store_bytes(null.as_slice()).finalize(); - for (auto i = chunks_count; i < n_; i++) { - proof_[i + n_] = cell; - } -} - -void MerkleTree::init_add_chunk(size_t index, td::Slice hash) { - CHECK(index < total_blocks_); - CHECK(proof_[index + n_].is_null()); - proof_[index + n_] = vm::CellBuilder().store_bytes(hash).finalize(); -} - -void MerkleTree::init_finish() { - for (size_t i = n_ - 1; i >= 1; i--) { - auto j = i * 2; - if (proof_[j].is_null()) { - continue; - } - if (i + 1 < n_ && proof_[i + 1].not_null() && proof_[j]->get_hash() == proof_[j + 2]->get_hash() && - proof_[j + 1]->get_hash() == proof_[j + 3]->get_hash()) { - // minor optimization for same chunks - proof_[i] = proof_[i + 1]; - } else { - proof_[i] = vm::CellBuilder().store_ref(proof_[j]).store_ref(proof_[j + 1]).finalize(); - } - } - if (proof_[1].not_null()) { - init_proof(); - } - CHECK(root_hash_); -} - -void MerkleTree::remove_chunk(std::size_t index) { - CHECK(index < n_); - index += n_; - while (proof_[index].not_null()) { - proof_[index] = {}; - index /= 2; - } -} - -bool MerkleTree::has_chunk(std::size_t index) const { - CHECK(index < n_); - index += n_; - return proof_[index].not_null(); -} - -void MerkleTree::add_chunk(std::size_t index, td::Slice hash) { - CHECK(hash.size() == 32); - CHECK(index < n_); - index += n_; - auto cell = vm::CellBuilder().store_bytes(hash).finalize(); - CHECK(proof_[index].is_null()); - proof_[index] = std::move(cell); - mark_[index] = mark_id_; - for (index /= 2; index != 0; index /= 2) { - CHECK(proof_[index].is_null()); - auto &left = proof_[index * 2]; - auto &right = proof_[index * 2 + 1]; - if (left.not_null() && right.not_null()) { - proof_[index] = vm::CellBuilder().store_ref(left).store_ref(right).finalize(); - mark_[index] = mark_id_; - } - } -} - -static td::Status do_validate(td::Ref ref, size_t depth) { - vm::CellSlice cs(vm::NoVm(), std::move(ref)); if (cs.is_special()) { if (cs.special_type() != vm::Cell::SpecialType::PrunnedBranch) { return td::Status::Error("Unexpected special cell"); @@ -194,154 +93,65 @@ static td::Status do_validate(td::Ref ref, size_t depth) { if (cs.size_refs() != 2) { return td::Status::Error("Node in proof must have two refs"); } - TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1)); - TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1)); + TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1)); + TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1)); } return td::Status::OK(); } -td::Status MerkleTree::validate_proof(td::Ref new_root) { - // 1. depth <= log_n - // 2. each non special node has two refs and nothing else - // 3. each list contains only hash - // 4. all special nodes are merkle proofs - vm::CellSlice cs(vm::NoVm(), new_root); - if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) { - return td::Status::Error("Proof must be a mekle proof cell"); +td::Status MerkleTree::add_proof(td::Ref proof) { + if (proof.is_null()) { + return td::Status::OK(); } - auto root = cs.fetch_ref(); - if (root_hash_ && root->get_hash(0).as_slice() != root_hash_.value().as_slice()) { - return td::Status::Error("Proof has invalid root hash"); + TRY_RESULT(proof_raw, unpack_proof(proof)); + if (root_hash_ != proof_raw->get_hash(0).bits()) { + return td::Status::Error("Root hash mismatch"); } - return do_validate(std::move(root), log_n_); -} - -td::Status MerkleTree::add_proof(td::Ref new_root) { - CHECK(root_proof_.not_null() || root_hash_); - TRY_STATUS(validate_proof(new_root)); - if (root_proof_.not_null()) { - auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(new_root)); + TRY_STATUS(do_validate_proof(proof_raw, depth_)); + if (root_proof_.is_null()) { + root_proof_ = std::move(proof); + } else { + auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(proof)); if (combined.is_null()) { return td::Status::Error("Can't combine proofs"); } root_proof_ = std::move(combined); - } else { - root_proof_ = std::move(new_root); } return td::Status::OK(); } -td::Status MerkleTree::validate_existing_chunk(const Chunk &chunk) { - vm::CellSlice cs(vm::NoVm(), proof_[chunk.index + n_]); - CHECK(cs.size() == chunk.hash.size()); - if (cs.as_bitslice().compare(chunk.hash.cbits()) != 0) { - return td::Status::Error("Hash mismatch"); +td::Result MerkleTree::get_piece_hash(size_t idx) const { + if (idx >= n_) { + return td::Status::Error("Index is too big"); } - return td::Status::OK(); -} - -td::Status MerkleTree::try_add_chunks(td::Span chunks) { - td::Bitset bitmask; - add_chunks(chunks, bitmask); - for (size_t i = 0; i < chunks.size(); i++) { - if (!bitmask.get(i)) { - return td::Status::Error(PSLICE() << "Invalid chunk #" << chunks[i].index); - } - } - return td::Status::OK(); -} - -void MerkleTree::add_chunks(td::Span chunks, td::Bitset &bitmask) { if (root_proof_.is_null()) { - return; + return td::Status::Error("Hash is not known"); } - - mark_id_++; - bitmask.reserve(chunks.size()); - for (size_t i = 0; i < chunks.size(); i++) { - const auto &chunk = chunks[i]; - if (has_chunk(chunk.index)) { - if (validate_existing_chunk(chunk).is_ok()) { - bitmask.set_one(i); - } - continue; + size_t l = 0, r = n_ - 1; + td::Ref node = unpack_proof(root_proof_).move_as_ok(); + while (true) { + vm::CellSlice cs(vm::NoVm(), std::move(node)); + if (cs.is_special()) { + return td::Status::Error("Hash is not known"); } - add_chunk(chunk.index, chunk.hash.as_slice()); - } - - root_proof_ = vm::CellBuilder::create_merkle_proof(merge(unpack_proof(root_proof_), 1)); - - for (size_t i = 0; i < chunks.size(); i++) { - const auto &chunk = chunks[i]; - if (has_chunk(chunk.index) && mark_[chunk.index + n_] == mark_id_) { - bitmask.set_one(i); + if (l == r) { + td::Bits256 hash; + CHECK(cs.fetch_bits_to(hash.bits(), 256)); + return hash; } - } -} - -td::Ref MerkleTree::merge(td::Ref root, size_t index) { - const auto &down = proof_[index]; - if (down.not_null()) { - if (down->get_hash() != root->get_hash(0)) { - proof_[index] = {}; + CHECK(cs.size_refs() == 2); + size_t mid = (l + r) / 2; + if (idx <= mid) { + node = cs.prefetch_ref(0); + r = mid; } else { - return down; + node = cs.prefetch_ref(1); + l = mid + 1; } } - - if (mark_[index] != mark_id_ || index >= n_) { - return root; - } - - vm::CellSlice cs(vm::NoVm(), root); - if (cs.is_special()) { - cleanup_add(index); - return root; - } - - CHECK(cs.size_refs() == 2); - vm::CellBuilder cb; - cb.store_bits(cs.fetch_bits(cs.size())); - auto left = merge(cs.fetch_ref(), index * 2); - auto right = merge(cs.fetch_ref(), index * 2 + 1); - cb.store_ref(std::move(left)).store_ref(std::move(right)); - return cb.finalize(); } -void MerkleTree::cleanup_add(size_t index) { - if (mark_[index] != mark_id_) { - return; - } - proof_[index] = {}; - if (index >= n_) { - return; - } - cleanup_add(index * 2); - cleanup_add(index * 2 + 1); -} - -void MerkleTree::init_proof() { - CHECK(proof_[1].not_null()); - td::Bits256 new_root_hash = proof_[1]->get_hash(0).as_array(); - CHECK(!root_hash_ || root_hash_.value() == new_root_hash); - root_hash_ = new_root_hash; - root_proof_ = vm::CellBuilder::create_merkle_proof(proof_[1]); -} - -td::Result> MerkleTree::gen_proof(size_t l, size_t r) { - if (root_proof_.is_null()) { - return td::Status::Error("got no proofs yet"); - } - auto usage_tree = std::make_shared(); - auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); - auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); - TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r))); - auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); - CHECK(res.not_null()); - return res; -} - -td::Status MerkleTree::do_gen_proof(td::Ref node, size_t il, size_t ir, size_t l, size_t r) const { +static td::Status do_gen_proof(td::Ref node, size_t il, size_t ir, size_t l, size_t r) { if (ir < l || il > r) { return td::Status::OK(); } @@ -358,4 +168,114 @@ td::Status MerkleTree::do_gen_proof(td::Ref node, size_t il, size_t ir TRY_STATUS(do_gen_proof(cs.fetch_ref(), ic + 1, ir, l, r)); return td::Status::OK(); } + +td::Result> MerkleTree::gen_proof(size_t l, size_t r) const { + if (root_proof_.is_null()) { + return td::Status::Error("Got no proofs yet"); + } + auto usage_tree = std::make_shared(); + auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); + auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); + TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r))); + auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); + CHECK(res.not_null()); + return res; +} + +static void do_gen_proof(td::Ref node, td::Ref node_raw, size_t depth_limit) { + if (depth_limit == 0) { + return; + } + // check if it is possible to load node without breaking virtualization + vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw)); + if (cs_raw.is_special()) { + return; + } + vm::CellSlice cs(vm::NoVm(), std::move(node)); + while (cs.have_refs()) { + do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1); + } +} + +td::Ref MerkleTree::get_root(size_t depth_limit) const { + if (depth_limit > depth_ || root_proof_.is_null()) { + return root_proof_; + } + auto usage_tree = std::make_shared(); + auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1); + auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr()); + do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_).move_as_ok(), depth_limit); + auto res = vm::MerkleProof::generate(root_raw, usage_tree.get()); + CHECK(res.not_null()); + return res; +} + +static td::Ref build_from_hashes(std::pair *p, std::pair *pend, + size_t len) { + if (len == 1) { + return vm::CellBuilder().store_bytes((p < pend ? p->second : td::Bits256::zero()).as_slice()).finalize(); + } + td::Ref l = build_from_hashes(p, pend, len / 2); + td::Ref r = build_from_hashes(p + len / 2, pend, len / 2); + return vm::CellBuilder().store_ref(l).store_ref(r).finalize(); +} + +td::Ref MerkleTree::do_add_pieces(td::Ref node, std::vector &ok_pieces, size_t il, + size_t ir, std::pair *pl, + std::pair *pr) { + if (pl == pr || il >= pieces_count_) { + return node; + } + vm::CellSlice cs; + if (node.is_null() || (cs = vm::CellSlice(vm::NoVm(), node)).is_special() || il + 1 == ir) { + if ((size_t)(pr - pl) != std::min(ir, pieces_count_) - il) { + return node; + } + td::Ref new_node = build_from_hashes(pl, pr, ir - il); + td::Bits256 new_hash = new_node->get_hash().bits(); + if (new_hash != (node.is_null() ? root_hash_ : node->get_hash(0).bits())) { + return node; + } + for (auto p = pl; p != pr; ++p) { + ok_pieces.push_back(p->first); + } + if (node.is_null() || cs.is_special()) { + node = std::move(new_node); + } + return node; + } + size_t imid = (il + ir) / 2; + auto pmid = pl; + while (pmid != pr && pmid->first < imid) { + ++pmid; + } + td::Ref l = do_add_pieces(cs.prefetch_ref(0), ok_pieces, il, imid, pl, pmid); + td::Ref r = do_add_pieces(cs.prefetch_ref(1), ok_pieces, imid, ir, pmid, pr); + if (l != cs.prefetch_ref(0) || r != cs.prefetch_ref(1)) { + node = vm::CellBuilder().store_ref(l).store_ref(r).finalize(); + } + return node; +} + +std::vector MerkleTree::add_pieces(std::vector> pieces) { + if (pieces.empty()) { + return {}; + } + std::sort(pieces.begin(), pieces.end()); + for (size_t i = 0; i + 1 < pieces.size(); ++i) { + CHECK(pieces[i].first != pieces[i + 1].first); + } + CHECK(pieces.back().first < pieces_count_); + std::vector ok_pieces; + td::Ref root; + if (!root_proof_.is_null()) { + root = unpack_proof(root_proof_).move_as_ok(); + } + root = do_add_pieces(root, ok_pieces, 0, n_, pieces.data(), pieces.data() + pieces.size()); + if (!root.is_null()) { + root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root)); + } + return ok_pieces; +} + } // namespace ton diff --git a/storage/MerkleTree.h b/storage/MerkleTree.h index 6f7be17d..cf080c40 100644 --- a/storage/MerkleTree.h +++ b/storage/MerkleTree.h @@ -24,6 +24,7 @@ #include "vm/cells.h" #include "Bitset.h" +#include namespace ton { // merkle_node$_ {n:#} left:^(ton::MerkleTree n) right:^(ton::MerkleTree n) = ton::MerkleTree (n + 1); @@ -31,66 +32,33 @@ namespace ton { class MerkleTree { public: - td::uint32 get_depth() const; - td::Ref get_root(size_t depth_limit = std::numeric_limits::max()) const; - td::Bits256 get_root_hash() const; - - MerkleTree(size_t chunks_count, td::Bits256 root_hash); - MerkleTree(size_t chunks_count, td::Ref root_proof); - - struct Chunk { - std::size_t index{0}; - td::Bits256 hash; - }; - - explicit MerkleTree(td::Span chunks); - MerkleTree() = default; - void init_begin(size_t chunks_count); - void init_add_chunk(std::size_t index, td::Slice hash); - void init_finish(); + MerkleTree(size_t pieces_count, td::Bits256 root_hash); + explicit MerkleTree(std::vector hashes); - // merge external proof with an existing proof - td::Status add_proof(td::Ref new_root); - // generate proof for all chunks from l to r inclusive - td::Result> gen_proof(size_t l, size_t r); + td::Status add_proof(td::Ref proof); + td::Result get_piece_hash(size_t idx) const; + td::Result> gen_proof(size_t l, size_t r) const; + td::Ref get_root(size_t depth_limit = std::numeric_limits::max()) const; - // Trying to add and validate list of chunks simultaniously - td::Status try_add_chunks(td::Span chunks); + std::vector add_pieces(std::vector> pieces); - // Returns bitmask of successfully added chunks - // Intended to be used during validation of a torrent. - // We got arbitrary chunks read from disk, and we got an arbirary proof. - // Now we can say about some chunks that they are correct. This ia a general way - // to do this. - // - // NB: already added chunks are simply validated. One should be careful - // not to process them twice - void add_chunks(td::Span chunks, td::Bitset &bitmask); + size_t get_depth() const { + return depth_; + } + + td::Bits256 get_root_hash() const { + return root_hash_; + } private: - td::uint64 total_blocks_; - std::size_t n_; // n = 2^log_n - td::uint32 log_n_; - std::size_t mark_id_{0}; - std::vector mark_; // n_ * 2 - std::vector> proof_; // n_ * 2 - - td::optional root_hash_; + size_t pieces_count_{0}; + td::Bits256 root_hash_ = td::Bits256::zero(); + size_t depth_{0}, n_{1}; td::Ref root_proof_; - td::Status validate_proof(td::Ref new_root); - bool has_chunk(std::size_t index) const; - void remove_chunk(std::size_t index); - - void add_chunk(std::size_t index, td::Slice hash); - void init_proof(); - - td::Ref merge(td::Ref root, size_t index); - void cleanup_add(size_t index); - td::Status do_gen_proof(td::Ref node, size_t il, size_t ir, size_t l, size_t r) const; - void do_gen_proof(td::Ref node, td::Ref node_raw, size_t depth_limit) const; - td::Status validate_existing_chunk(const Chunk &chunk); + td::Ref do_add_pieces(td::Ref node, std::vector &ok_pieces, size_t il, size_t ir, + std::pair *pl, std::pair *pr); }; } // namespace ton diff --git a/storage/MicrochunkTree.cpp b/storage/MicrochunkTree.cpp new file mode 100644 index 00000000..3249d25b --- /dev/null +++ b/storage/MicrochunkTree.cpp @@ -0,0 +1,214 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "MicrochunkTree.h" +#include "Torrent.h" +#include "vm/cells/CellSlice.h" +#include "vm/cells/MerkleProof.h" + +namespace ton { + +static td::Ref prun(const td::Ref &node) { + vm::CellBuilder cb; + cb.store_long(static_cast(vm::Cell::SpecialType::PrunnedBranch), 8); + cb.store_long(1, 8); + cb.store_bytes(node->get_hash(0).as_slice()); + cb.store_long(node->get_depth(0), 16); + return cb.finalize(true); +} + +MicrochunkTree::Builder::Builder(td::uint64 file_size, td::uint64 prun_size) + : file_size_(file_size), prun_size_(prun_size) { + total_size_ = MICROCHUNK_SIZE; + while (total_size_ < file_size) { + total_size_ *= 2; + } +} + +void MicrochunkTree::Builder::add_data(td::Slice s) { + CHECK(cur_size_ + s.size() <= file_size_); + while (s.size() > 0) { + size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE; + size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr; + if (buf_remaining > s.size()) { + memcpy(cur_microchunk_ + buf_ptr, s.data(), s.size()); + cur_size_ += s.size(); + return; + } + memcpy(cur_microchunk_ + buf_ptr, s.data(), buf_remaining); + cur_size_ += buf_remaining; + s.remove_prefix(buf_remaining); + add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE)); + } +} + +MicrochunkTree MicrochunkTree::Builder::finalize() { + CHECK(cur_size_ == file_size_); + if (cur_size_ % MICROCHUNK_SIZE != 0) { + size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE; + size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr; + memset(cur_microchunk_ + buf_ptr, 0, buf_remaining); + cur_size_ += buf_remaining; + add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE)); + } + memset(cur_microchunk_, 0, MICROCHUNK_SIZE); + while (cur_size_ < total_size_) { + add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE)); + cur_size_ += MICROCHUNK_SIZE; + } + CHECK(proof_.size() == 1); + MicrochunkTree tree(vm::CellBuilder::create_merkle_proof(std::move(proof_[0]))); + CHECK(tree.total_size_ == total_size_); + return tree; +} + +void MicrochunkTree::Builder::add_microchunk(td::Slice s) { + CHECK(s.size() == MICROCHUNK_SIZE); + td::Ref node = vm::CellBuilder().store_zeroes(2).store_bytes(s).finalize_novm(); + while (!proof_.empty() && proof_.back()->get_depth(0) == node->get_depth(0)) { + td::Ref left = std::move(proof_.back()); + proof_.pop_back(); + node = vm::CellBuilder().store_zeroes(2).store_ref(std::move(left)).store_ref(std::move(node)).finalize_novm(); + if ((MICROCHUNK_SIZE << node->get_depth(0)) <= prun_size_) { + node = prun(node); + } + } + proof_.push_back(std::move(node)); +} + +MicrochunkTree::MicrochunkTree(td::Ref root_proof) : root_proof_(root_proof) { + td::Ref virt_root = vm::MerkleProof::virtualize(root_proof_, 1); + CHECK(!virt_root.is_null()); + CHECK(virt_root->get_depth() <= 50); + total_size_ = MICROCHUNK_SIZE << virt_root->get_depth(); + root_hash_ = virt_root->get_hash().bits(); +} + +class GetMicrochunkProof { + public: + GetMicrochunkProof(td::uint64 l, td::uint64 r, Torrent &torrent) : l(l), r(r), torrent(torrent) { + } + + td::Result> unprun(td::uint64 il, td::uint64 ir) { + if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) { + TRY_RESULT(data, get_microchunk(il)); + return vm::CellBuilder().store_zeroes(2).store_bytes(data).finalize_novm(); + } + td::uint64 imid = (il + ir) / 2; + TRY_RESULT(node_l, unprun(il, imid)); + TRY_RESULT(node_r, unprun(imid, ir)); + td::Ref node = + vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm(); + if (l >= ir || il >= r) { + node = prun(node); + } + return node; + } + + td::Result> unprun(const td::Ref &node, td::uint64 il, td::uint64 ir) { + vm::CellSlice cs(vm::NoVm(), node); + if (!cs.is_special()) { + return node; + } + TRY_RESULT(result, unprun(il, ir)); + if (result->get_hash(0) != node->get_hash(0)) { + return td::Status::Error("Hash mismatch"); + } + return result; + } + + td::Result> get_proof(td::Ref node, td::uint64 il, td::uint64 ir) { + if (l >= ir || il >= r) { + return prun(node); + } + if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) { + return unprun(node, il, ir); + } + if (l <= il && ir <= r) { + return prun(node); + } + td::uint64 imid = (il + ir) / 2; + TRY_RESULT_ASSIGN(node, unprun(node, il, ir)); + vm::CellSlice cs(vm::NoVm(), node); + if (cs.size_ext() != 2 + (2 << 16)) { + return td::Status::Error("Invalid node in microchunk tree"); + } + TRY_RESULT(node_l, get_proof(cs.prefetch_ref(0), il, imid)); + TRY_RESULT(node_r, get_proof(cs.prefetch_ref(1), imid, ir)); + return vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm(); + } + + private: + td::uint64 l, r; + Torrent &torrent; + + td::uint64 cache_offset = 0; + std::string cache; + + td::Result get_microchunk(td::uint64 l) { + DCHECK(l % MicrochunkTree::MICROCHUNK_SIZE == 0); + td::uint64 r = l + MicrochunkTree::MICROCHUNK_SIZE; + if (!(cache_offset <= l && r <= cache_offset + cache.size())) { + td::uint64 piece_size = torrent.get_info().piece_size; + td::uint64 piece_i = l / piece_size; + if (piece_i < torrent.get_info().pieces_count()) { + TRY_RESULT(piece, torrent.get_piece_data(piece_i)); + piece.resize(piece_size, '\0'); + cache = std::move(piece); + } else { + cache = std::string(piece_size, '\0'); + } + cache_offset = piece_i * piece_size; + } + return td::Slice{cache.data() + (l - cache_offset), MicrochunkTree::MICROCHUNK_SIZE}; + } +}; + +td::Result> MicrochunkTree::get_proof(td::uint64 l, td::uint64 r, Torrent &torrent) const { + if (root_proof_.is_null()) { + return td::Status::Error("Empty microchunk tree"); + } + if (l % MICROCHUNK_SIZE != 0 || r % MICROCHUNK_SIZE != 0 || l >= r || r > total_size_) { + return td::Status::Error("Invalid range"); + } + if (!torrent.inited_info()) { + return td::Status::Error("Torrent info is not ready"); + } + if (!torrent.get_info().piece_size % MICROCHUNK_SIZE != 0) { + return td::Status::Error("Invalid piece size in torrent"); + } + td::Ref root_raw = vm::CellSlice(vm::NoVm(), root_proof_).prefetch_ref(); + TRY_RESULT(result, GetMicrochunkProof(l, r, torrent).get_proof(std::move(root_raw), 0, total_size_)); + return vm::CellBuilder::create_merkle_proof(std::move(result)); +} + +td::Result MicrochunkTree::Builder::build_for_torrent(Torrent &torrent, td::uint64 prun_size) { + if (!torrent.inited_info()) { + return td::Status::Error("Torrent info is not available"); + } + const TorrentInfo &info = torrent.get_info(); + Builder builder(info.file_size, prun_size); + td::uint64 pieces_count = info.pieces_count(); + for (td::uint64 i = 0; i < pieces_count; ++i) { + TRY_RESULT(piece, torrent.get_piece_data(i)); + builder.add_data(piece); + } + MicrochunkTree tree = builder.finalize(); + return tree; +} + +} // namespace ton diff --git a/storage/MicrochunkTree.h b/storage/MicrochunkTree.h new file mode 100644 index 00000000..a1661640 --- /dev/null +++ b/storage/MicrochunkTree.h @@ -0,0 +1,74 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once + +#include "td/utils/optional.h" +#include "td/utils/Slice.h" +#include "vm/cells.h" + +#include "Bitset.h" +#include + +namespace ton { + +class Torrent; + +class MicrochunkTree { + public: + static const size_t MICROCHUNK_SIZE = 64; + + class Builder { + public: + explicit Builder(td::uint64 file_size, td::uint64 prun_size = 1 << 17); + void add_data(td::Slice s); + MicrochunkTree finalize(); + + static td::Result build_for_torrent(Torrent &torrent, td::uint64 prun_size = 1 << 17); + private: + td::uint64 file_size_; + td::uint64 prun_size_; + td::uint64 total_size_; + std::vector> proof_; + unsigned char cur_microchunk_[MICROCHUNK_SIZE]; + td::uint64 cur_size_ = 0; + + void add_microchunk(td::Slice s); + }; + + MicrochunkTree() = default; + MicrochunkTree(td::Ref root_proof); + + td::Result> get_proof(td::uint64 l, td::uint64 r, Torrent &torrent) const; + + td::Ref get_root() const { + return root_proof_; + } + td::Bits256 get_root_hash() const { + return root_hash_; + } + td::uint64 get_total_size() const { + return total_size_; + } + + private: + td::Bits256 root_hash_ = td::Bits256::zero(); + td::uint64 total_size_ = 0; + td::Ref root_proof_ = {}; +}; + +} // namespace ton diff --git a/storage/NodeActor.cpp b/storage/NodeActor.cpp index 434a3d62..cfc3ef18 100644 --- a/storage/NodeActor.cpp +++ b/storage/NodeActor.cpp @@ -20,17 +20,38 @@ #include "NodeActor.h" #include "vm/boc.h" +#include "vm/cellslice.h" #include "td/utils/Enumerator.h" #include "td/utils/tests.h" +#include "td/utils/overloaded.h" +#include "tl-utils/common-utils.hpp" +#include "tl-utils/tl-utils.hpp" +#include "auto/tl/ton_api.hpp" +#include "td/actor/MultiPromise.h" namespace ton { -NodeActor::NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, bool should_download) +NodeActor::NodeActor(PeerId self_id, Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download) : self_id_(self_id) , torrent_(std::move(torrent)) , callback_(std::move(callback)) + , node_callback_(std::move(node_callback)) + , db_(std::move(db)) + , should_download_(should_download) { +} + +NodeActor::NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download, + DbInitialData db_initial_data) + : self_id_(self_id) + , torrent_(std::move(torrent)) + , callback_(std::move(callback)) + , node_callback_(std::move(node_callback)) + , db_(std::move(db)) , should_download_(should_download) - , parts_helper_(torrent.get_info().pieces_count()) { + , pending_set_file_priority_(std::move(db_initial_data.priorities)) + , pieces_in_db_(std::move(db_initial_data.pieces_in_db)) { } void NodeActor::start_peer(PeerId peer_id, td::Promise> promise) { @@ -45,25 +66,103 @@ void NodeActor::start_peer(PeerId peer_id, td::Promisesecond); } void NodeActor::start_up() { - callback_->register_self(actor_id(this)); + node_callback_->register_self(actor_id(this)); + db_store_torrent(); + if (torrent_.inited_info()) { + init_torrent(); + } + loop(); +} + +void NodeActor::init_torrent() { auto pieces_count = torrent_.get_info().pieces_count(); + parts_helper_.init_parts_count(pieces_count); parts_.parts.resize(pieces_count); auto header = torrent_.get_header_parts_range(); - for (td::uint32 i = static_cast(header.begin); i < header.end; i++) { + for (auto i = static_cast(header.begin); i < header.end; i++) { parts_helper_.set_part_priority(i, 255); } for (td::uint32 i = 0; i < pieces_count; i++) { if (torrent_.is_piece_ready(i)) { - parts_helper_.on_self_part_ready(i); - parts_.parts[i].ready = true; + on_part_ready(i); + } + } + + torrent_info_str_ = + std::make_shared(vm::std_boc_serialize(torrent_.get_info().as_cell()).move_as_ok()); + for (auto &p : peers_) { + auto &state = p.second.state; + state->torrent_info_str_ = torrent_info_str_; + CHECK(!state->torrent_info_ready_.exchange(true)); + } + LOG(INFO) << "Inited torrent info for " << torrent_.get_hash().to_hex() << ": size=" << torrent_.get_info().file_size + << ", pieces=" << torrent_.get_info().pieces_count(); + if (torrent_.inited_header()) { + init_torrent_header(); + } +} + +void NodeActor::init_torrent_header() { + if (header_ready_) { + return; + } + header_ready_ = true; + size_t files_count = torrent_.get_files_count().unwrap(); + for (size_t i = 0; i < files_count; ++i) { + file_name_to_idx_[torrent_.get_file_name(i).str()] = i; + } + db_store_priorities_paused_ = true; + file_priority_.resize(files_count, 1); + for (auto &s : pending_set_file_priority_) { + td::Promise P = [](td::Result) {}; + s.file.visit( + td::overloaded([&](const PendingSetFilePriority::All &) { set_all_files_priority(s.priority, std::move(P)); }, + [&](const size_t &i) { set_file_priority_by_idx(i, s.priority, std::move(P)); }, + [&](const std::string &name) { set_file_priority_by_name(name, s.priority, std::move(P)); })); + } + pending_set_file_priority_.clear(); + torrent_.enable_write_to_files(); + db_store_priorities_paused_ = false; + db_store_priorities(); + + auto pieces = pieces_in_db_; + for (td::uint64 p : pieces) { + if (!torrent_.is_piece_in_memory(p)) { + db_erase_piece(p); + } + } + for (td::uint64 p : torrent_.get_pieces_in_memory()) { + if (!pieces_in_db_.count(p)) { + db_store_piece(p, torrent_.get_piece_data(p).move_as_ok()); + } + } + db_update_pieces_list(); + recheck_parts(Torrent::PartsRange{0, torrent_.get_info().pieces_count()}); + db_store_torrent_meta(); + + LOG(INFO) << "Inited torrent header for " << torrent_.get_hash().to_hex() + << ": files=" << torrent_.get_files_count().value() << ", included_size=" << torrent_.get_included_size(); +} + +void NodeActor::recheck_parts(Torrent::PartsRange range) { + CHECK(torrent_.inited_info()); + for (size_t i = range.begin; i < range.end; ++i) { + if (parts_.parts[i].ready && !torrent_.is_piece_ready(i)) { + parts_helper_.on_self_part_not_ready(i); + parts_.parts[i].ready = false; + } else if (!parts_.parts[i].ready && torrent_.is_piece_ready(i)) { + on_part_ready((PartId)i); } } - loop(); } void NodeActor::loop_will_upload() { @@ -80,12 +179,12 @@ void NodeActor::loop_will_upload() { alarm_timestamp().relax(will_upload_at_); std::vector> peers; for (auto &it : peers_) { - auto state = it.second.state.lock(); + auto &state = it.second.state; bool needed = false; - if (state->peer_state_) { - needed = state->peer_state_.value().want_download; + if (state->peer_state_ready_) { + needed = state->peer_state_.load().want_download; } - peers.emplace_back(!needed, !state->node_state_.want_download, -state->download.speed(), it.first); + peers.emplace_back(!needed, !state->node_state_.load().want_download, -it.second.download_speed.speed(), it.first); } std::sort(peers.begin(), peers.end()); @@ -101,9 +200,11 @@ void NodeActor::loop_will_upload() { for (auto &it : peers_) { auto will_upload = peers_set.count(it.first) > 0; - auto state = it.second.state.lock(); - if (state->node_state_.will_upload != will_upload) { - state->node_state_.will_upload = will_upload; + auto &state = it.second.state; + auto node_state = state->node_state_.load(); + if (node_state.will_upload != will_upload) { + node_state.will_upload = will_upload; + state->node_state_.exchange(node_state); state->notify_peer(); } } @@ -112,43 +213,64 @@ void NodeActor::loop_will_upload() { void NodeActor::loop() { loop_get_peers(); loop_start_stop_peers(); - loop_queries(); - loop_will_upload(); + if (torrent_.inited_info()) { + loop_queries(); + loop_will_upload(); + } if (!ready_parts_.empty()) { for (auto &it : peers_) { - auto state = it.second.state.lock(); - state->node_ready_parts_.insert(state->node_ready_parts_.end(), ready_parts_.begin(), ready_parts_.end()); + auto &state = it.second.state; + state->node_ready_parts_.add_elements(ready_parts_); state->notify_peer(); } ready_parts_.clear(); } - if (torrent_.is_completed() && !is_completed_) { - is_completed_ = true; - callback_->on_completed(); + if (next_db_store_meta_at_ && next_db_store_meta_at_.is_in_past()) { + db_store_torrent_meta(); + } + + if (torrent_.get_fatal_error().is_error()) { + for (auto &promise : wait_for_completion_) { + promise.set_error(torrent_.get_fatal_error().clone()); + } + wait_for_completion_.clear(); + } else if (torrent_.is_completed()) { + db_store_torrent_meta(); + if (!is_completed_) { + for (auto &promise : wait_for_completion_) { + promise.set_result(td::Unit()); + } + wait_for_completion_.clear(); + is_completed_ = true; + callback_->on_completed(); + } } } std::string NodeActor::get_stats_str() { td::StringBuilder sb; - sb << "Node " << self_id_ << " " << torrent_.get_ready_parts_count() << "\t" << download_; + sb << "Node " << self_id_ << " " << torrent_.get_ready_parts_count() << "\t" << download_speed_; sb << "\toutq " << parts_.total_queries; sb << "\n"; for (auto &it : peers_) { - auto state = it.second.state.lock(); + auto &state = it.second.state; sb << "\tPeer " << it.first; - sb << "\t" << parts_helper_.get_ready_parts(it.second.peer_token).ones_count(); - sb << "\t" << state->download; - if (state->peer_state_) { - auto &peer_state = state->peer_state_.value(); + if (torrent_.inited_info()) { + sb << "\t" << parts_helper_.get_ready_parts(it.second.peer_token).ones_count(); + } + sb << "\t" << it.second.download_speed; + if (state->peer_state_ready_) { + auto peer_state = state->peer_state_.load(); sb << "\t up:" << peer_state.will_upload; sb << "\tdown:" << peer_state.want_download; - sb << "\tcnt:" << parts_helper_.get_want_download_count(it.second.peer_token); + if (torrent_.inited_info()) { + sb << "\tcnt:" << parts_helper_.get_want_download_count(it.second.peer_token); + } } - sb << "\toutq:" << state->node_queries_.size(); - sb << "\tinq:" << state->peer_queries_.size(); - auto &node_state = state->node_state_; + sb << "\toutq:" << state->node_queries_active_.size(); + auto node_state = state->node_state_.load(); sb << "\tNup:" << node_state.will_upload; sb << "\tNdown:" << node_state.want_download; sb << "\n"; @@ -170,35 +292,57 @@ std::string NodeActor::get_stats_str() { return sb.as_cslice().str(); } -void NodeActor::set_file_priority(size_t i, td::uint8 priority) { - auto o_files_count = torrent_.get_files_count(); - if (!o_files_count) { +void NodeActor::set_all_files_priority(td::uint8 priority, td::Promise promise) { + if (!header_ready_) { + pending_set_file_priority_.clear(); + pending_set_file_priority_.push_back(PendingSetFilePriority{PendingSetFilePriority::All(), priority}); + db_store_priorities(); + promise.set_result(false); return; } - auto files_count = o_files_count.unwrap(); - if (file_priority_.size() != files_count) { - // by default all parts priority == 1 - file_priority_.resize(files_count, 1); + auto header_range = torrent_.get_header_parts_range(); + for (td::uint32 i = 0; i < torrent_.get_info().pieces_count(); i++) { + if (!header_range.contains(i)) { + parts_helper_.set_part_priority(i, priority); + } } + for (size_t i = 0; i < file_priority_.size(); ++i) { + file_priority_[i] = priority; + torrent_.set_file_excluded(i, priority == 0); + } + recheck_parts(Torrent::PartsRange{0, torrent_.get_info().pieces_count()}); + db_store_priorities(); + update_pieces_in_db(0, torrent_.get_info().pieces_count()); + if (!torrent_.is_completed()) { + is_completed_ = false; + } + promise.set_result(true); + yield(); +} +void NodeActor::set_file_priority_by_idx(size_t i, td::uint8 priority, td::Promise promise) { + if (!header_ready_) { + pending_set_file_priority_.push_back(PendingSetFilePriority{i, priority}); + db_store_priorities(); + promise.set_result(false); + return; + } + auto files_count = torrent_.get_files_count().unwrap(); if (i >= files_count) { - for (td::uint32 part_i = 0; part_i < torrent_.get_info().pieces_count(); part_i++) { - parts_helper_.set_part_priority(part_i, priority); - } - for (auto &p : file_priority_) { - p = priority; - } + promise.set_error(td::Status::Error("File index is too big")); return; } if (file_priority_[i] == priority) { + promise.set_result(true); return; } file_priority_[i] = priority; + torrent_.set_file_excluded(i, priority == 0); auto range = torrent_.get_file_parts_range(i); - td::uint32 begin = static_cast(range.begin); - td::uint32 end = static_cast(range.end); - for (td::uint32 i = begin; i < end; i++) { - if (i == begin || i + 1 == end) { + recheck_parts(range); + update_pieces_in_db(range.begin, range.end); + for (auto i = range.begin; i < range.end; i++) { + if (i == range.begin || i + 1 == range.end) { auto chunks = torrent_.chunks_by_piece(i); td::uint8 max_priority = 0; for (auto chunk_id : chunks) { @@ -213,15 +357,95 @@ void NodeActor::set_file_priority(size_t i, td::uint8 priority) { parts_helper_.set_part_priority(i, priority); } } + db_store_priorities(); + if (!torrent_.is_completed()) { + is_completed_ = false; + } + promise.set_result(true); yield(); } +void NodeActor::set_file_priority_by_name(std::string name, td::uint8 priority, td::Promise promise) { + if (!header_ready_) { + pending_set_file_priority_.push_back(PendingSetFilePriority{name, priority}); + db_store_priorities(); + promise.set_result(false); + return; + } + auto it = file_name_to_idx_.find(name); + if (it == file_name_to_idx_.end()) { + promise.set_error(td::Status::Error("No such file")); + return; + } + set_file_priority_by_idx(it->second, priority, std::move(promise)); +} + +void NodeActor::wait_for_completion(td::Promise promise) { + if (torrent_.get_fatal_error().is_error()) { + promise.set_error(torrent_.get_fatal_error().clone()); + } else if (is_completed_) { + promise.set_result(td::Unit()); + } else { + wait_for_completion_.push_back(std::move(promise)); + } +} + void NodeActor::set_should_download(bool should_download) { + if (should_download == should_download_) { + return; + } should_download_ = should_download; + db_store_torrent(); yield(); } +void NodeActor::load_from(td::optional meta, std::string files_path, td::Promise promise) { + auto S = [&]() -> td::Status { + if (meta) { + TorrentInfo &info = meta.value().info; + if (info.get_hash() != torrent_.get_hash()) { + return td::Status::Error("Incorrect hash in meta"); + } + if (!torrent_.inited_info()) { + LOG(INFO) << "Loading torrent info for " << torrent_.get_hash().to_hex(); + TRY_STATUS(torrent_.init_info(std::move(info))); + init_torrent(); + } + auto &header = meta.value().header; + if (header && !torrent_.inited_header()) { + LOG(INFO) << "Loading torrent header for " << torrent_.get_hash().to_hex(); + TRY_STATUS(torrent_.set_header(header.unwrap())); + init_torrent_header(); + } + auto proof = std::move(meta.value().root_proof); + if (!proof.is_null()) { + LOG(INFO) << "Loading proof for " << torrent_.get_hash().to_hex(); + TRY_STATUS(torrent_.add_proof(std::move(proof))); + } + } + TRY_STATUS_PREFIX(torrent_.get_fatal_error().clone(), "Fatal error: "); + if (torrent_.inited_header() && !files_path.empty()) { + torrent_.load_from_files(std::move(files_path)); + } + TRY_STATUS_PREFIX(torrent_.get_fatal_error().clone(), "Fatal error: "); + return td::Status::OK(); + }(); + if (S.is_error()) { + LOG(WARNING) << "Load from failed: " << S; + promise.set_error(std::move(S)); + } else { + promise.set_result(td::Unit()); + } + if (torrent_.inited_header()) { + recheck_parts(Torrent::PartsRange{0, torrent_.get_info().pieces_count()}); + } + loop(); +} + void NodeActor::tear_down() { + for (auto &promise : wait_for_completion_) { + promise.set_error(td::Status::Error("Torrent closed")); + } callback_->on_closed(std::move(torrent_)); } @@ -235,16 +459,24 @@ void NodeActor::loop_start_stop_peers() { } if (peer.actor.empty()) { - LOG(ERROR) << "Init Peer " << self_id_ << " -> " << peer_id; - auto state = peer.state.lock(); - state->node = peer.notifier.get(); - for (td::uint32 i = 0; i < parts_.parts.size(); i++) { - if (parts_.parts[i].ready) { - state->node_ready_parts_.push_back(i); + auto &state = peer.state = std::make_shared(peer.notifier.get()); + if (torrent_.inited_info()) { + std::vector node_ready_parts; + for (td::uint32 i = 0; i < parts_.parts.size(); i++) { + if (parts_.parts[i].ready) { + node_ready_parts.push_back(i); + } } + state->node_ready_parts_.add_elements(std::move(node_ready_parts)); + state->torrent_info_str_ = torrent_info_str_; + state->torrent_info_ready_ = true; + } else { + state->torrent_info_response_callback_ = [SelfId = actor_id(this)](td::BufferSlice data) { + td::actor::send_closure(SelfId, &NodeActor::got_torrent_info_str, std::move(data)); + }; } peer.peer_token = parts_helper_.register_peer(peer_id); - peer.actor = callback_->create_peer(self_id_, peer_id, peer.state); + peer.actor = node_callback_->create_peer(self_id_, peer_id, peer.state); } } } @@ -255,29 +487,31 @@ void NodeActor::loop_queries() { } for (auto &it : peers_) { auto peer_token = it.second.peer_token; - auto state = it.second.state.lock(); - if (!state->peer_state_) { + auto &state = it.second.state; + if (!state->peer_state_ready_) { parts_helper_.set_peer_limit(peer_token, 0); continue; } - if (!state->peer_state_.value().will_upload) { + if (!state->peer_state_.load().will_upload) { parts_helper_.set_peer_limit(peer_token, 0); continue; } - parts_helper_.set_peer_limit(peer_token, - td::narrow_cast(MAX_PEER_TOTAL_QUERIES - state->node_queries_.size())); + parts_helper_.set_peer_limit( + peer_token, td::narrow_cast(MAX_PEER_TOTAL_QUERIES - state->node_queries_active_.size())); } auto parts = parts_helper_.get_rarest_parts(MAX_TOTAL_QUERIES); for (auto &part : parts) { auto it = peers_.find(part.peer_id); CHECK(it != peers_.end()); - auto state = it->second.state.lock(); - CHECK(state->peer_state_); - CHECK(state->peer_state_.value().will_upload); - CHECK(state->node_queries_.size() < MAX_PEER_TOTAL_QUERIES); + auto &state = it->second.state; + CHECK(state->peer_state_ready_); + CHECK(state->peer_state_.load().will_upload); + CHECK(state->node_queries_active_.size() < MAX_PEER_TOTAL_QUERIES); auto part_id = part.part_id; - state->node_queries_[static_cast(part_id)]; + if (state->node_queries_active_.insert(static_cast(part_id)).second) { + state->node_queries_.add_element(static_cast(part_id)); + } parts_helper_.lock_part(part_id); parts_.total_queries++; parts_.parts[part_id].query_to_peer = part.peer_id; @@ -290,7 +524,7 @@ void NodeActor::loop_get_peers() { return; } if (next_get_peers_at_.is_in_past()) { - callback_->get_peers(promise_send_closure(td::actor::actor_id(this), &NodeActor::got_peers)); + node_callback_->get_peers(self_id_, promise_send_closure(td::actor::actor_id(this), &NodeActor::got_peers)); has_get_peers_ = true; return; } @@ -315,67 +549,75 @@ void NodeActor::got_peers(td::Result> r_peers) { } void NodeActor::loop_peer(const PeerId &peer_id, Peer &peer) { - auto state = peer.state.lock(); - CHECK(!state->peer.empty()); + auto &state = peer.state; + if (!state->peer_ready_ || !torrent_.inited_info()) { + return; + } - for (auto part_id : state->peer_ready_parts_) { + for (auto part_id : state->peer_ready_parts_.read()) { parts_helper_.on_peer_part_ready(peer.peer_token, part_id); } - state->peer_ready_parts_.clear(); // Answer queries from peer bool should_notify_peer = false; auto want_download = parts_helper_.get_want_download_count(peer.peer_token) > 0; - if (state->node_state_.want_download != want_download) { - state->node_state_.want_download = want_download; + auto node_state = state->node_state_.load(); + if (node_state.want_download != want_download) { + node_state.want_download = want_download; + state->node_state_.exchange(node_state); should_notify_peer = true; } - for (auto it = state->peer_queries_.begin(); it != state->peer_queries_.end();) { - if (it->second) { - it++; - } else { - should_notify_peer = true; - it->second = [&]() -> td::Result { - if (!state->node_state_.will_upload) { - return td::Status::Error("Won't upload"); - } - TRY_RESULT(proof, torrent_.get_piece_proof(it->first)); - TRY_RESULT(data, torrent_.get_piece_data(it->first)); - PeerState::Part res; - TRY_RESULT(proof_serialized, vm::std_boc_serialize(std::move(proof))); - res.proof = std::move(proof_serialized); - res.data = td::BufferSlice(std::move(data)); - return std::move(res); - }(); + std::vector>> results; + for (td::uint32 part_id : state->peer_queries_.read()) { + should_notify_peer = true; + auto res = [&]() -> td::Result { + if (!node_state.will_upload) { + return td::Status::Error("Won't upload"); + } + TRY_RESULT(proof, torrent_.get_piece_proof(part_id)); + TRY_RESULT(data, torrent_.get_piece_data(part_id)); + PeerState::Part res; + TRY_RESULT(proof_serialized, vm::std_boc_serialize(std::move(proof))); + res.proof = std::move(proof_serialized); + res.data = td::BufferSlice(std::move(data)); + td::uint64 size = res.data.size() + res.proof.size(); + upload_speed_.add(size); + peer.upload_speed.add(size); + return std::move(res); + }(); + results.emplace_back(part_id, std::move(res)); + } + state->peer_queries_results_.add_elements(std::move(results)); + + // Handle results from peer + for (auto &p : state->node_queries_results_.read()) { + auto part_id = p.first; + if (!state->node_queries_active_.count(part_id)) { + continue; + } + auto r_unit = p.second.move_fmap([&](PeerState::Part part) -> td::Result { + TRY_RESULT(proof, vm::std_boc_deserialize(part.proof)); + TRY_STATUS(torrent_.add_piece(part_id, part.data.as_slice(), std::move(proof))); + update_pieces_in_db(part_id, part_id + 1); + download_speed_.add(part.data.size()); + peer.download_speed.add(part.data.size()); + return td::Unit(); + }); + + parts_.parts[part_id].query_to_peer = {}; + parts_.total_queries--; + state->node_queries_active_.erase(part_id); + parts_helper_.unlock_part(part_id); + + if (r_unit.is_ok()) { + on_part_ready(part_id); } } - // Handle results from peer - for (auto it = state->node_queries_.begin(); it != state->node_queries_.end();) { - if (it->second) { - auto part_id = it->first; - auto r_unit = it->second.unwrap().move_fmap([&](PeerState::Part part) -> td::Result { - TRY_RESULT(proof, vm::std_boc_deserialize(part.proof)); - TRY_STATUS(torrent_.add_piece(part_id, part.data.as_slice(), std::move(proof))); - download_.add(part.data.size(), td::Timestamp::now()); - return td::Unit(); - }); - - parts_.parts[part_id].query_to_peer = {}; - parts_.total_queries--; - it = state->node_queries_.erase(it); - parts_helper_.unlock_part(part_id); - - if (r_unit.is_ok()) { - on_part_ready(part_id); - } else { - //LOG(ERROR) << "Failed " << part_id; - } - } else { - it++; - } + if (!header_ready_ && torrent_.inited_info() && torrent_.inited_header()) { + init_torrent_header(); } if (should_notify_peer) { @@ -391,8 +633,421 @@ void NodeActor::on_part_ready(PartId part_id) { parts_.parts[part_id].ready = true; for (auto &peer : peers_) { // TODO: notify only peer want_download_count == 0 - peer.second.state.unsafe()->notify_node(); + peer.second.state->notify_peer(); } ready_parts_.push_back(part_id); } + +void NodeActor::got_torrent_info_str(td::BufferSlice data) { + if (torrent_.inited_info()) { + return; + } + auto r_info_cell = vm::std_boc_deserialize(data.as_slice()); + if (r_info_cell.is_error()) { + return; + } + TorrentInfo info; + vm::CellSlice cs = vm::load_cell_slice(r_info_cell.move_as_ok()); + if (!info.unpack(cs)) { + return; + } + info.init_cell(); + if (torrent_.init_info(std::move(info)).is_error()) { + return; + } + init_torrent(); + loop(); +} + +void NodeActor::update_pieces_in_db(td::uint64 begin, td::uint64 end) { + bool changed = false; + for (auto i = begin; i < end; ++i) { + bool stored = pieces_in_db_.count(i); + bool need_store = torrent_.is_piece_in_memory(i); + if (need_store == stored) { + continue; + } + changed = true; + if (need_store) { + db_store_piece(i, torrent_.get_piece_data(i).move_as_ok()); + } else { + db_erase_piece(i); + } + } + if (changed) { + db_update_pieces_list(); + } +} + +void NodeActor::db_store_torrent() { + if (!db_) { + return; + } + auto obj = create_tl_object(); + obj->active_download_ = should_download_; + obj->root_dir_ = torrent_.get_root_dir(); + db_->set(create_hash_tl_object(torrent_.get_hash()), serialize_tl_object(obj, true), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_store_priorities() { + if (!db_ || db_store_priorities_paused_) { + return; + } + auto obj = create_tl_object(); + if (file_priority_.empty()) { + for (auto &s : pending_set_file_priority_) { + s.file.visit(td::overloaded( + [&](const PendingSetFilePriority::All &) { + obj->actions_.push_back(create_tl_object(s.priority)); + }, + [&](const size_t &i) { + obj->actions_.push_back(create_tl_object(i, s.priority)); + }, + [&](const std::string &name) { + obj->actions_.push_back(create_tl_object(name, s.priority)); + })); + } + } else { + size_t prior_cnt[256]; + std::fill(prior_cnt, prior_cnt + 256, 0); + for (td::uint8 p : file_priority_) { + ++prior_cnt[p]; + } + auto base_priority = (td::uint8)(std::max_element(prior_cnt, prior_cnt + 256) - prior_cnt); + obj->actions_.push_back(create_tl_object(base_priority)); + for (size_t i = 0; i < file_priority_.size(); ++i) { + if (file_priority_[i] != base_priority) { + obj->actions_.push_back(create_tl_object(i, file_priority_[i])); + } + } + } + db_->set(create_hash_tl_object(torrent_.get_hash()), + serialize_tl_object(obj, true), [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent priorities to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_store_torrent_meta() { + if (!db_ || !torrent_.inited_info() || (td::int64)torrent_.get_ready_parts_count() == last_stored_meta_count_) { + after_db_store_torrent_meta(last_stored_meta_count_); + return; + } + next_db_store_meta_at_ = td::Timestamp::never(); + auto meta = torrent_.get_meta_str(); + db_->set(create_hash_tl_object(torrent_.get_hash()), td::BufferSlice(meta), + [new_count = (td::int64)torrent_.get_ready_parts_count(), SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &NodeActor::after_db_store_torrent_meta, R.move_as_error()); + } else { + td::actor::send_closure(SelfId, &NodeActor::after_db_store_torrent_meta, new_count); + } + }); +} + +void NodeActor::after_db_store_torrent_meta(td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent meta to db: " << R.move_as_error(); + } else { + last_stored_meta_count_ = R.move_as_ok(); + } + next_db_store_meta_at_ = td::Timestamp::in(td::Random::fast(10.0, 20.0)); + alarm_timestamp().relax(next_db_store_meta_at_); +} + +void NodeActor::db_store_piece(td::uint64 i, std::string s) { + pieces_in_db_.insert(i); + if (!db_) { + return; + } + db_->set(create_hash_tl_object(torrent_.get_hash(), i), td::BufferSlice(s), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to store piece to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_erase_piece(td::uint64 i) { + pieces_in_db_.erase(i); + if (!db_) { + return; + } + db_->erase(create_hash_tl_object(torrent_.get_hash(), i), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to store piece to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::db_update_pieces_list() { + if (!db_) { + return; + } + auto obj = create_tl_object(); + for (td::uint64 p : pieces_in_db_) { + obj->pieces_.push_back(p); + } + db_->set(create_hash_tl_object(torrent_.get_hash()), + serialize_tl_object(obj, true), [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to store list of pieces to db: " << R.move_as_error(); + } + }); +} + +void NodeActor::load_from_db(std::shared_ptr db, td::Bits256 hash, td::unique_ptr callback, + td::unique_ptr node_callback, + td::Promise> promise) { + class Loader : public td::actor::Actor { + public: + Loader(std::shared_ptr db, td::Bits256 hash, td::unique_ptr callback, + td::unique_ptr node_callback, td::Promise> promise) + : db_(std::move(db)) + , hash_(hash) + , callback_(std::move(callback)) + , node_callback_(std::move(node_callback)) + , promise_(std::move(promise)) { + } + + void finish(td::Result> R) { + promise_.set_result(std::move(R)); + stop(); + } + + void start_up() override { + db::db_get( + *db_, create_hash_tl_object(hash_), false, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Torrent: ")); + } else { + td::actor::send_closure(SelfId, &Loader::got_torrent, R.move_as_ok()); + } + }); + } + + void got_torrent(tl_object_ptr obj) { + root_dir_ = std::move(obj->root_dir_); + active_download_ = obj->active_download_; + db_->get(create_hash_tl_object(hash_), + [SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Meta: ")); + return; + } + auto r = R.move_as_ok(); + if (r.status == td::KeyValueReader::GetStatus::NotFound) { + td::actor::send_closure(SelfId, &Loader::got_meta_str, td::optional()); + } else { + td::actor::send_closure(SelfId, &Loader::got_meta_str, std::move(r.value)); + } + }); + } + + void got_meta_str(td::optional meta_str) { + auto r_torrent = [&]() -> td::Result { + Torrent::Options options; + options.root_dir = std::move(root_dir_); + options.in_memory = false; + options.validate = false; + if (meta_str) { + TRY_RESULT(meta, TorrentMeta::deserialize(meta_str.value().as_slice())); + options.validate = true; + return Torrent::open(std::move(options), std::move(meta)); + } else { + return Torrent::open(std::move(options), hash_); + } + }(); + if (r_torrent.is_error()) { + finish(r_torrent.move_as_error()); + return; + } + torrent_ = r_torrent.move_as_ok(); + + db::db_get( + *db_, create_hash_tl_object(hash_), true, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Priorities: ")); + } else { + td::actor::send_closure(SelfId, &Loader::got_priorities, R.move_as_ok()); + } + }); + } + + void got_priorities(tl_object_ptr priorities) { + if (priorities != nullptr) { + for (auto &p : priorities->actions_) { + td::Variant file; + int priority = 0; + ton_api::downcast_call(*p, td::overloaded( + [&](ton_api::storage_priorityAction_all &obj) { + file = PendingSetFilePriority::All(); + priority = obj.priority_; + }, + [&](ton_api::storage_priorityAction_idx &obj) { + file = (size_t)obj.idx_; + priority = obj.priority_; + }, + [&](ton_api::storage_priorityAction_name &obj) { + file = std::move(obj.name_); + priority = obj.priority_; + })); + auto R = td::narrow_cast_safe(priority); + if (R.is_error()) { + LOG(ERROR) << "Invalid priority in db: " << R.move_as_error(); + continue; + } + priorities_.push_back(PendingSetFilePriority{std::move(file), R.move_as_ok()}); + } + } + + db::db_get( + *db_, create_hash_tl_object(hash_), true, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Pieces in db: ")); + } else { + td::actor::send_closure(SelfId, &Loader::got_pieces_in_db, R.move_as_ok()); + } + }); + } + + void got_pieces_in_db(tl_object_ptr list) { + for (auto idx : list == nullptr ? std::vector() : list->pieces_) { + ++remaining_pieces_in_db_; + db_->get(create_hash_tl_object(hash_, idx), + [SelfId = actor_id(this), idx](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &Loader::finish, R.move_as_error_prefix("Piece in db: ")); + return; + } + auto r = R.move_as_ok(); + td::optional piece; + if (r.status == td::KeyValueReader::GetStatus::Ok) { + piece = std::move(r.value); + } + td::actor::send_closure(SelfId, &Loader::got_piece_in_db, idx, std::move(piece)); + }); + } + if (remaining_pieces_in_db_ == 0) { + finished_db_read(); + } + } + + void got_piece_in_db(size_t idx, td::optional data) { + if (data) { + auto r_proof = torrent_.value().get_piece_proof(idx); + if (r_proof.is_ok()) { + torrent_.value().add_piece(idx, data.unwrap(), r_proof.move_as_ok()); + } + pieces_in_db_.insert(idx); + } + if (--remaining_pieces_in_db_ == 0) { + finished_db_read(); + } + } + + void finished_db_read() { + DbInitialData data; + data.priorities = std::move(priorities_); + data.pieces_in_db = std::move(pieces_in_db_); + finish(td::actor::create_actor("Node", 1, torrent_.unwrap(), std::move(callback_), + std::move(node_callback_), std::move(db_), active_download_, + std::move(data))); + } + + private: + std::shared_ptr db_; + td::Bits256 hash_; + td::unique_ptr callback_; + td::unique_ptr node_callback_; + td::Promise> promise_; + + std::string root_dir_; + bool active_download_{false}; + td::optional torrent_; + std::vector priorities_; + std::set pieces_in_db_; + size_t remaining_pieces_in_db_ = 0; + }; + td::actor::create_actor("loader", std::move(db), hash, std::move(callback), std::move(node_callback), + std::move(promise)) + .release(); +} + +void NodeActor::cleanup_db(std::shared_ptr db, td::Bits256 hash, td::Promise promise) { + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise(std::move(promise)); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + db::db_get( + *db, create_hash_tl_object(hash), true, + [db, promise = ig.get_promise(), hash](td::Result> R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + auto pieces = R.move_as_ok(); + if (pieces == nullptr) { + promise.set_result(td::Unit()); + return; + } + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise(std::move(promise)); + db->erase(create_hash_tl_object(hash), ig.get_promise()); + for (auto idx : pieces->pieces_) { + db->erase(create_hash_tl_object(hash, idx), ig.get_promise()); + } + }); +} + +void NodeActor::get_peers_info(td::Promise> promise) { + auto result = std::make_shared>>(); + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise([result, promise = std::move(promise), download_speed = download_speed_.speed(), + upload_speed = upload_speed_.speed(), parts = parts_.parts.size()](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + promise.set_result( + create_tl_object(std::move(*result), download_speed, upload_speed, parts)); + }); + + result->reserve(peers_.size()); + size_t i = 0; + for (auto &peer : peers_) { + if (!peer.second.state->peer_online_) { + continue; + } + result->push_back(create_tl_object()); + auto &obj = *result->back(); + obj.download_speed_ = peer.second.download_speed.speed(); + obj.upload_speed_ = peer.second.upload_speed.speed(); + obj.ready_parts_ = parts_helper_.get_ready_parts(peer.second.peer_token).ones_count(); + node_callback_->get_peer_info( + self_id_, peer.first, + [result, i, promise = ig.get_promise()](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, r, std::move(R)); + result->at(i)->adnl_id_ = r.first; + result->at(i)->ip_str_ = r.second; + promise.set_result(td::Unit()); + }); + ++i; + } +} + } // namespace ton diff --git a/storage/NodeActor.h b/storage/NodeActor.h index 79c78b7b..f7fe1c39 100644 --- a/storage/NodeActor.h +++ b/storage/NodeActor.h @@ -25,42 +25,87 @@ #include "Torrent.h" #include "td/utils/Random.h" +#include "td/utils/Variant.h" #include +#include "db.h" namespace ton { class NodeActor : public td::actor::Actor { public: + class NodeCallback { + public: + virtual ~NodeCallback() = default; + virtual td::actor::ActorOwn create_peer(PeerId self_id, PeerId peer_id, + std::shared_ptr state) = 0; + virtual void get_peers(PeerId src, td::Promise> peers) = 0; + virtual void register_self(td::actor::ActorId self) = 0; + virtual void get_peer_info(PeerId src, PeerId peer, td::Promise> promise) { + promise.set_error(td::Status::Error("Not implemented")); + } + }; + class Callback { public: - virtual ~Callback() { - } - virtual td::actor::ActorOwn create_peer(PeerId self_id, PeerId peer_id, - td::SharedState state) = 0; - virtual void get_peers(td::Promise> peers) = 0; - virtual void register_self(td::actor::ActorId self) = 0; - - //TODO: proper callbacks + virtual ~Callback() = default; virtual void on_completed() = 0; virtual void on_closed(ton::Torrent torrent) = 0; }; - NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, bool should_download = true); + struct PendingSetFilePriority { + struct All {}; + td::Variant file; + td::uint8 priority; + }; + struct DbInitialData { + std::vector priorities; + std::set pieces_in_db; + }; + + NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download = true); + NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr callback, + td::unique_ptr node_callback, std::shared_ptr db, bool should_download, + DbInitialData db_initial_data); void start_peer(PeerId peer_id, td::Promise> promise); - ton::Torrent *with_torrent() { - return &torrent_; + struct NodeState { + Torrent &torrent; + bool active_download; + double download_speed; + double upload_speed; + const std::vector &file_priority; + }; + void with_torrent(td::Promise promise) { + promise.set_value( + NodeState{torrent_, should_download_, download_speed_.speed(), upload_speed_.speed(), file_priority_}); } std::string get_stats_str(); - void set_file_priority(size_t i, td::uint8 priority); void set_should_download(bool should_download); + void set_all_files_priority(td::uint8 priority, td::Promise promise); + void set_file_priority_by_idx(size_t i, td::uint8 priority, td::Promise promise); + void set_file_priority_by_name(std::string name, td::uint8 priority, td::Promise promise); + + void load_from(td::optional meta, std::string files_path, td::Promise promise); + + void wait_for_completion(td::Promise promise); + void get_peers_info(td::Promise> promise); + + static void load_from_db(std::shared_ptr db, td::Bits256 hash, td::unique_ptr callback, + td::unique_ptr node_callback, + td::Promise> promise); + static void cleanup_db(std::shared_ptr db, td::Bits256 hash, td::Promise promise); + private: PeerId self_id_; ton::Torrent torrent_; + std::shared_ptr torrent_info_str_; std::vector file_priority_; td::unique_ptr callback_; + td::unique_ptr node_callback_; + std::shared_ptr db_; bool should_download_{false}; class Notifier : public td::actor::Actor { @@ -80,24 +125,13 @@ class NodeActor : public td::actor::Actor { struct Peer { td::actor::ActorOwn actor; td::actor::ActorOwn notifier; - td::SharedState state; + std::shared_ptr state; PartsHelper::PeerToken peer_token; + LoadSpeed download_speed, upload_speed; }; std::map peers_; - struct QueryId { - PeerId peer; - PartId part; - - auto key() const { - return std::tie(peer, part); - } - bool operator<(const QueryId &other) const { - return key() < other.key(); - } - }; - struct PartsSet { struct Info { td::optional query_to_peer; @@ -110,7 +144,7 @@ class NodeActor : public td::actor::Actor { PartsSet parts_; PartsHelper parts_helper_; std::vector ready_parts_; - LoadSpeed download_; + LoadSpeed download_speed_, upload_speed_; td::Timestamp next_get_peers_at_; bool has_get_peers_{false}; @@ -118,9 +152,22 @@ class NodeActor : public td::actor::Actor { static constexpr double GET_PEER_EACH = 5; bool is_completed_{false}; + std::vector> wait_for_completion_; td::Timestamp will_upload_at_; + std::vector pending_set_file_priority_; + bool header_ready_ = false; + std::map file_name_to_idx_; + std::set pieces_in_db_; + bool db_store_priorities_paused_ = false; + td::int64 last_stored_meta_count_ = -1; + td::Timestamp next_db_store_meta_at_ = td::Timestamp::now(); + + void init_torrent(); + void init_torrent_header(); + void recheck_parts(Torrent::PartsRange range); + void on_signal_from_peer(PeerId peer_id); void start_up() override; @@ -134,13 +181,23 @@ class NodeActor : public td::actor::Actor { static constexpr size_t MAX_TOTAL_QUERIES = 20; static constexpr size_t MAX_PEER_TOTAL_QUERIES = 5; void loop_queries(); - bool try_send_query(); - bool try_send_part(PartId part_id); void loop_get_peers(); void got_peers(td::Result> r_peers); void loop_peer(const PeerId &peer_id, Peer &peer); void on_part_ready(PartId part_id); void loop_will_upload(); + + void got_torrent_info_str(td::BufferSlice data); + + void update_pieces_in_db(td::uint64 begin, td::uint64 end); + + void db_store_torrent(); + void db_store_priorities(); + void db_store_torrent_meta(); + void after_db_store_torrent_meta(td::Result R); + void db_store_piece(td::uint64 i, std::string s); + void db_erase_piece(td::uint64 i); + void db_update_pieces_list(); }; } // namespace ton diff --git a/storage/PartsHelper.h b/storage/PartsHelper.h index 1fe0558d..6ad4e0b3 100644 --- a/storage/PartsHelper.h +++ b/storage/PartsHelper.h @@ -26,12 +26,16 @@ namespace ton { struct PartsHelper { public: - PartsHelper(size_t parts_count) : parts_(parts_count), peers_(64) { + explicit PartsHelper(size_t parts_count = 0) : parts_(parts_count), peers_(64) { peers_[0].is_valid = true; } using PartId = size_t; using PeerToken = size_t; + void init_parts_count(size_t parts_count) { + CHECK(parts_.empty()); + parts_.resize(parts_count); + } PeerToken register_self() { return self_token_; } @@ -123,6 +127,22 @@ struct PartsHelper { change_key(part_id, part->rnd, part->peers_count, 0, part->priority, part->priority); } + void on_self_part_not_ready(PartId part_id) { + auto peer = get_peer(self_token_); + if (!peer->ready_parts.set_zero(part_id)) { + return; + } + auto part = get_part(part_id); + CHECK(part->is_ready); + part->is_ready = false; + for (auto &peer : peers_) { + if (peer.ready_parts.get(part_id)) { + peer.want_download_count++; + } + } + change_key(part_id, part->rnd, 0, part->peers_count, part->priority, part->priority); + } + struct RarePart { PartId part_id; PeerId peer_id; diff --git a/storage/PeerActor.cpp b/storage/PeerActor.cpp index 51ec20d2..b2bf5a89 100644 --- a/storage/PeerActor.cpp +++ b/storage/PeerActor.cpp @@ -38,7 +38,7 @@ ton::ton_api::object_ptr to_ton_api(const PeerState return ton::ton_api::make_object(state.will_upload, state.want_download); } -PeerActor::PeerActor(td::unique_ptr callback, td::SharedState state) +PeerActor::PeerActor(td::unique_ptr callback, std::shared_ptr state) : callback_(std::move(callback)), state_(std::move(state)) { CHECK(callback_); } @@ -50,7 +50,6 @@ td::uint64 PeerActor::create_and_send_query(ArgsT &&... args) { td::uint64 PeerActor::send_query(td::BufferSlice query) { auto query_id = next_query_id_++; - //LOG(ERROR) << "send_query " << to_string(ton::fetch_tl_object(std::move(query), true).ok()); callback_->send_query(query_id, std::move(query)); return query_id; } @@ -64,8 +63,8 @@ void PeerActor::notify_node() { } void PeerActor::execute_query(td::BufferSlice query, td::Promise promise) { + on_pong(); TRY_RESULT_PROMISE(promise, f, ton::fetch_tl_object(std::move(query), true)); - //LOG(ERROR) << "execute_query " << to_string(f); ton::ton_api::downcast_call( *f, td::overloaded( [&](ton::ton_api::storage_ping &ping) { @@ -73,6 +72,7 @@ void PeerActor::execute_query(td::BufferSlice query, td::Promise r_answer) { } void PeerActor::on_pong() { - wait_pong_till_ = td::Timestamp::in(4); - state_.lock()->peer_online_ = true; + wait_pong_till_ = td::Timestamp::in(10); + state_->peer_online_ = true; notify_node(); } void PeerActor::on_update_result(td::Result r_answer) { update_query_id_ = {}; if (r_answer.is_ok()) { - peer_is_inited_ = true; - have_pieces_list_.clear(); + if (!peer_is_inited_) { + peer_init_offset_ += UPDATE_INIT_BLOCK_SIZE; + if (peer_init_offset_ >= have_pieces_.as_slice().size()) { + peer_is_inited_ = true; + } + } + } else { + have_pieces_list_.insert(have_pieces_list_.end(), sent_have_pieces_list_.begin(), sent_have_pieces_list_.end()); } + sent_have_pieces_list_.clear(); } void PeerActor::on_get_piece_result(PartId piece_id, td::Result r_answer) { - auto state = state_.lock(); - auto it = state->node_queries_.find(piece_id); - if (it == state->node_queries_.end()) { - LOG(ERROR) << "???"; - return; - } //TODO: handle errors ??? - it->second = [&]() -> td::Result { + auto res = [&]() -> td::Result { TRY_RESULT(slice, std::move(r_answer)); TRY_RESULT(piece, ton::fetch_result(slice.as_slice())); PeerState::Part res; @@ -114,6 +115,7 @@ void PeerActor::on_get_piece_result(PartId piece_id, td::Result res.proof = std::move(piece->proof_); return std::move(res); }(); + state_->node_queries_results_.add_element(std::make_pair(piece_id, std::move(res))); notify_node(); } @@ -123,10 +125,26 @@ void PeerActor::on_update_state_result(td::Result r_answer) { } } +void PeerActor::on_get_info_result(td::Result r_answer) { + get_info_query_id_ = {}; + next_get_info_at_ = td::Timestamp::in(5.0); + alarm_timestamp().relax(next_get_info_at_); + if (r_answer.is_error()) { + return; + } + auto R = fetch_tl_object(r_answer.move_as_ok(), true); + if (R.is_error()) { + return; + } + td::BufferSlice data = std::move(R.ok_ref()->data_); + if (!data.empty() && !state_->torrent_info_ready_) { + state_->torrent_info_response_callback_(std::move(data)); + } +} + void PeerActor::on_query_result(td::uint64 query_id, td::Result r_answer) { if (r_answer.is_ok()) { on_pong(); - state_.lock()->download.add(r_answer.ok().size(), td::Timestamp::now()); } if (ping_query_id_ && ping_query_id_.value() == query_id) { on_ping_result(std::move(r_answer)); @@ -134,11 +152,14 @@ void PeerActor::on_query_result(td::uint64 query_id, td::Result on_update_result(std::move(r_answer)); } else if (update_state_query_.query_id && update_state_query_.query_id.value() == query_id) { on_update_state_result(std::move(r_answer)); + } else if (get_info_query_id_ && get_info_query_id_.value() == query_id) { + on_get_info_result(std::move(r_answer)); } else { for (auto &query_it : node_get_piece_) { if (query_it.second.query_id && query_it.second.query_id.value() == query_id) { on_get_piece_result(query_it.first, std::move(r_answer)); - query_it.second.query_id = {}; + node_get_piece_.erase(query_it.first); + break; } } } @@ -151,8 +172,8 @@ void PeerActor::start_up() { node_session_id_ = td::Random::secure_uint64(); - auto state = state_.lock(); - state->peer = actor_id(this); + state_->peer = actor_id(this); + state_->peer_ready_ = true; notify_node(); schedule_loop(); @@ -165,6 +186,7 @@ void PeerActor::loop() { loop_update_init(); loop_update_state(); loop_update_pieces(); + loop_get_torrent_info(); loop_node_get_piece(); loop_peer_get_piece(); @@ -175,8 +197,8 @@ void PeerActor::loop() { void PeerActor::loop_pong() { if (wait_pong_till_ && wait_pong_till_.is_in_past()) { wait_pong_till_ = {}; - LOG(INFO) << "Disconnected"; - state_.lock()->peer_online_ = false; + LOG(DEBUG) << "Disconnected from peer"; + state_->peer_online_ = false; notify_node(); } alarm_timestamp().relax(wait_pong_till_); @@ -203,25 +225,24 @@ td::BufferSlice PeerActor::create_update_query(ton::tl_object_ptrnode_state_.load(); + auto s = have_pieces_.as_slice(); + if (s.size() <= peer_init_offset_) { + peer_is_inited_ = true; + return; + } + s = s.substr(peer_init_offset_, UPDATE_INIT_BLOCK_SIZE); auto query = create_update_query(ton::create_tl_object( - td::BufferSlice(have_pieces_.as_slice()), to_ton_api(state->node_state_))); + td::BufferSlice(s), (int)peer_init_offset_, to_ton_api(node_state))); // take care about update_state_query initial state - update_state_query_.state = state->node_state_; + update_state_query_.state = node_state; update_state_query_.query_id = 0; update_query_id_ = send_query(std::move(query)); @@ -232,9 +253,9 @@ void PeerActor::loop_update_state() { return; } - auto state = state_.lock(); - if (!(update_state_query_.state == state->node_state_)) { - update_state_query_.state = state->node_state_; + auto node_state = state_->node_state_.load(); + if (!(update_state_query_.state == node_state)) { + update_state_query_.state = node_state; update_state_query_.query_id = {}; } @@ -248,49 +269,45 @@ void PeerActor::loop_update_state() { } void PeerActor::update_have_pieces() { - auto state = state_.lock(); - have_pieces_list_.insert(have_pieces_list_.end(), state->node_ready_parts_.begin(), state->node_ready_parts_.end()); - for (auto piece_id : state->node_ready_parts_) { + auto node_ready_parts = state_->node_ready_parts_.read(); + for (auto piece_id : node_ready_parts) { + if (piece_id < peer_init_offset_ + UPDATE_INIT_BLOCK_SIZE) { + have_pieces_list_.push_back(piece_id); + } have_pieces_.set_one(piece_id); } - state->node_ready_parts_.clear(); } void PeerActor::loop_update_pieces() { - if (update_query_id_) { - return; - } - - if (!peer_is_inited_) { + if (update_query_id_ || !peer_is_inited_) { return; } update_have_pieces(); if (!have_pieces_list_.empty()) { + size_t count = std::min(have_pieces_list_.size(), 1500); + sent_have_pieces_list_.assign(have_pieces_list_.end() - count, have_pieces_list_.end()); + have_pieces_list_.erase(have_pieces_list_.end() - count, have_pieces_list_.end()); auto query = create_update_query(ton::create_tl_object( - td::transform(have_pieces_list_, [](auto x) { return static_cast(x); }))); + td::transform(sent_have_pieces_list_, [](auto x) { return static_cast(x); }))); update_query_id_ = send_query(std::move(query)); } } -void PeerActor::loop_node_get_piece() { - auto state = state_.lock(); - - for (auto it = node_get_piece_.begin(); it != node_get_piece_.end();) { - auto other_it = state->node_queries_.find(it->first); - if (other_it == state->node_queries_.end() || other_it->second) { - it = node_get_piece_.erase(it); - } else { - it++; - } +void PeerActor::loop_get_torrent_info() { + if (get_info_query_id_ || state_->torrent_info_ready_) { + return; } + if (next_get_info_at_ && !next_get_info_at_.is_in_past()) { + return; + } + get_info_query_id_ = create_and_send_query(); +} - for (auto &query_it : state->node_queries_) { - if (query_it.second) { - continue; - } - node_get_piece_.emplace(query_it.first, NodePieceQuery{}); +void PeerActor::loop_node_get_piece() { + for (auto part : state_->node_queries_.read()) { + node_get_piece_.emplace(part, NodePieceQuery{}); } for (auto &query_it : node_get_piece_) { @@ -304,40 +321,29 @@ void PeerActor::loop_node_get_piece() { } void PeerActor::loop_peer_get_piece() { - auto state = state_.lock(); - // process answers - for (auto &it : state->peer_queries_) { - if (!it.second) { - continue; - } - auto promise_it = peer_get_piece_.find(it.first); + for (auto &p : state_->peer_queries_results_.read()) { + state_->peer_queries_active_.erase(p.first); + auto promise_it = peer_get_piece_.find(p.first); if (promise_it == peer_get_piece_.end()) { continue; } - promise_it->second.promise.set_result(it.second.unwrap().move_map([](PeerState::Part part) { + promise_it->second.promise.set_result(p.second.move_map([](PeerState::Part part) { return ton::create_serialize_tl_object(std::move(part.proof), std::move(part.data)); })); peer_get_piece_.erase(promise_it); - } - - // erase unneeded queries - for (auto it = state->peer_queries_.begin(); it != state->peer_queries_.end();) { - if (peer_get_piece_.count(it->first) == 0) { - it = state->peer_queries_.erase(it); - notify_node(); - } else { - it++; - } + notify_node(); } // create queries + std::vector new_peer_queries; for (auto &query_it : peer_get_piece_) { - auto res = state->peer_queries_.emplace(query_it.first, td::optional>()); - if (res.second) { + if (state_->peer_queries_active_.insert(query_it.first).second) { + new_peer_queries.push_back(query_it.first); notify_node(); } } + state_->peer_queries_.add_elements(std::move(new_peer_queries)); } void PeerActor::loop_notify_node() { @@ -345,13 +351,14 @@ void PeerActor::loop_notify_node() { return; } need_notify_node_ = false; - state_.lock()->notify_node(); + state_->notify_node(); } void PeerActor::execute_ping(td::uint64 session_id, td::Promise promise) { if (!peer_session_id_ || peer_session_id_.value() != session_id) { peer_session_id_ = session_id; peer_is_inited_ = false; + peer_init_offset_ = 0; update_query_id_ = {}; update_state_query_.query_id = {}; @@ -369,11 +376,11 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update, promise.set_value(ton::create_serialize_tl_object()); - auto state = state_.lock(); + std::vector new_peer_ready_parts; auto add_piece = [&](PartId id) { if (!peer_have_pieces_.get(id)) { peer_have_pieces_.set_one(id); - state->peer_ready_parts_.push_back(id); + new_peer_ready_parts.push_back(id); notify_node(); } }; @@ -383,15 +390,15 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update, if (peer_seqno_ >= seqno) { return; } - if (state->peer_state_ && state->peer_state_.value() == peer_state) { + if (state_->peer_state_ready_ && state_->peer_state_.load() == peer_state) { return; } peer_seqno_ = seqno; - state->peer_state_ = peer_state; + state_->peer_state_.exchange(peer_state); + state_->peer_state_ready_ = true; notify_node(); }; - //LOG(ERROR) << "Got " << to_string(add_update); downcast_call(*add_update.update_, td::overloaded( [&](ton::ton_api::storage_updateHavePieces &have_pieces) { @@ -404,16 +411,24 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update, update_peer_state(from_ton_api(*init.state_)); td::Bitset new_bitset; new_bitset.set_raw(init.have_pieces_.as_slice().str()); + size_t offset = init.have_pieces_offset_ * 8; for (auto size = new_bitset.size(), i = size_t(0); i < size; i++) { if (new_bitset.get(i)) { - add_piece(static_cast(i)); + add_piece(static_cast(offset + i)); } } })); + state_->peer_ready_parts_.add_elements(std::move(new_peer_ready_parts)); } void PeerActor::execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise promise) { PartId piece_id = get_piece.piece_id_; peer_get_piece_[piece_id] = {std::move(promise)}; } + +void PeerActor::execute_get_torrent_info(td::Promise promise) { + td::BufferSlice result = create_serialize_tl_object( + state_->torrent_info_ready_ ? state_->torrent_info_str_->clone() : td::BufferSlice()); + promise.set_result(std::move(result)); +} } // namespace ton diff --git a/storage/PeerActor.h b/storage/PeerActor.h index cc72496d..fd34bc88 100644 --- a/storage/PeerActor.h +++ b/storage/PeerActor.h @@ -21,7 +21,6 @@ #include "Bitset.h" #include "PeerState.h" -#include "SharedState.h" #include "td/utils/optional.h" @@ -38,14 +37,14 @@ class PeerActor : public td::actor::Actor { virtual void send_query(td::uint64 query_id, td::BufferSlice query) = 0; }; - PeerActor(td::unique_ptr callback, td::SharedState state); + PeerActor(td::unique_ptr callback, std::shared_ptr state); void execute_query(td::BufferSlice query, td::Promise promise); void on_query_result(td::uint64 query_id, td::Result r_answer); private: td::unique_ptr callback_; - td::SharedState state_; + std::shared_ptr state_; bool need_notify_node_{false}; td::uint64 next_query_id_{0}; @@ -53,7 +52,9 @@ class PeerActor : public td::actor::Actor { // ping td::Timestamp next_ping_at_; td::optional ping_query_id_; + td::optional get_info_query_id_; td::Timestamp wait_pong_till_; + td::Timestamp next_get_info_at_; // startSession td::uint64 node_session_id_; @@ -63,15 +64,17 @@ class PeerActor : public td::actor::Actor { td::optional peer_session_id_; td::optional update_query_id_; bool peer_is_inited_{false}; + size_t peer_init_offset_{0}; td::uint32 node_seqno_{0}; td::Bitset have_pieces_; std::vector have_pieces_list_; + std::vector sent_have_pieces_list_; td::uint32 peer_seqno_{0}; // update state struct UpdateState { td::optional query_id; - PeerState::State state; + PeerState::State state{false, false}; }; UpdateState update_state_query_; @@ -102,6 +105,7 @@ class PeerActor : public td::actor::Actor { void loop_update_init(); void loop_update_pieces(); void update_have_pieces(); + void loop_get_torrent_info(); void loop_update_state(); @@ -112,14 +116,14 @@ class PeerActor : public td::actor::Actor { void loop_peer_get_piece(); void execute_add_update(ton::ton_api::storage_addUpdate &add_update, td::Promise promise); - void execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise promise); + void execute_get_torrent_info(td::Promise promise); void on_update_result(td::Result r_answer); void on_get_piece_result(PartId piece_id, td::Result r_answer); - void on_update_state_result(td::Result r_answer); + void on_get_info_result(td::Result r_answer); template td::uint64 create_and_send_query(ArgsT &&... args); @@ -127,5 +131,7 @@ class PeerActor : public td::actor::Actor { void schedule_loop(); void notify_node(); + + static const size_t UPDATE_INIT_BLOCK_SIZE = 6000; }; } // namespace ton diff --git a/storage/PeerManager.h b/storage/PeerManager.h new file mode 100644 index 00000000..52297ac5 --- /dev/null +++ b/storage/PeerManager.h @@ -0,0 +1,275 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "rldp2/rldp.h" +#include "td/actor/actor.h" +#include "overlay/overlay.h" +#include "NodeActor.h" + +namespace ton_rldp = ton::rldp2; + +class PeerManager : public td::actor::Actor { + public: + PeerManager(ton::adnl::AdnlNodeIdShort adnl_id, ton::overlay::OverlayIdFull overlay_id, bool client_mode, + td::actor::ActorId overlays, td::actor::ActorId adnl, + td::actor::ActorId rldp) + : overlay_id_(std::move(overlay_id)) + , client_mode_(client_mode) + , overlays_(std::move(overlays)) + , adnl_(std::move(adnl)) + , rldp_(std::move(rldp)) { + CHECK(register_adnl_id(adnl_id) == 1); + } + void start_up() override { + } + void tear_down() override { + for (const auto& p : subscribed_peers_) { + if (p.second > 0) { + auto adnl_id = peer_to_andl(p.first); + if (adnl_id.is_ok()) { + send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, adnl_id.move_as_ok(), + overlay_id_.compute_short_id()); + } + } + } + } + void send_query(ton::PeerId src, ton::PeerId dst, td::BufferSlice query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + TRY_RESULT_PROMISE(promise, dst_id, peer_to_andl(dst)); + send_closure(overlays_, &ton::overlay::Overlays::send_query_via, dst_id, src_id, overlay_id_.compute_short_id(), "", + std::move(promise), td::Timestamp::in(10), std::move(query), 1 << 25, rldp_); + } + + void execute_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, + td::Promise promise) { + auto src_id = register_adnl_id(src); + auto dst_id = register_adnl_id(dst); + auto it = peers_.find(std::make_pair(dst_id, src_id)); + if (it == peers_.end()) { + auto node_it = nodes_.find(dst_id); + if (node_it == nodes_.end()) { + LOG(ERROR) << "Unknown query destination"; + promise.set_error(td::Status::Error("Unknown query destination")); + return; + } + if (!node_it->second.is_alive()) { + LOG(ERROR) << "Expired query destination"; + promise.set_error(td::Status::Error("Unknown query destination")); + return; + } + send_closure(node_it->second, &ton::NodeActor::start_peer, src_id, + [promise = std::move(promise), + data = std::move(data)](td::Result> r_peer) mutable { + TRY_RESULT_PROMISE(promise, peer, std::move(r_peer)); + send_closure(peer, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); + }); + return; + } + send_closure(it->second, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); + } + + void register_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { + peers_[std::make_pair(src, dst)] = std::move(peer); + register_src(src, [](td::Result res) { res.ensure(); }); + } + + void register_node(ton::PeerId src, td::actor::ActorId node) { + nodes_[src] = std::move(node); + register_src(src, [](td::Result res) { res.ensure(); }); + } + + void unregister_node(ton::PeerId src, td::actor::ActorId node) { + auto it = nodes_.find(src); + CHECK(it != nodes_.end()); + if (it->second == node) { + nodes_.erase(it); + } + unregister_src(src, [](td::Result res) { res.ensure(); }); + } + + void unregister_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { + auto it = peers_.find(std::make_pair(src, dst)); + CHECK(it != peers_.end()); + if (it->second == peer) { + peers_.erase(it); + } + unregister_src(src, [](td::Result res) { res.ensure(); }); + } + + void unregister_src(ton::PeerId src, td::Promise promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + if (--subscribed_peers_[src] == 0) { + subscribed_peers_.erase(src); + send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, src_id, overlay_id_.compute_short_id()); + } + promise.set_value({}); + } + void register_src(ton::PeerId src, td::Promise promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + if (subscribed_peers_[src]++ == 0) { + auto rules = ton::overlay::OverlayPrivacyRules{}; + class Callback : public ton::overlay::Overlays::Callback { + public: + explicit Callback(td::actor::ActorId peer_manager, ton::adnl::AdnlNodeIdShort dst) + : peer_manager_(std::move(peer_manager)), dst_(dst) { + } + void receive_message(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, + td::BufferSlice data) override { + } + void receive_query(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, + td::BufferSlice data, td::Promise promise) override { + td::actor::send_closure(peer_manager_, &PeerManager::execute_query, src, dst_, std::move(data), + std::move(promise)); + } + void receive_broadcast(ton::PublicKeyHash src, ton::overlay::OverlayIdShort overlay_id, + td::BufferSlice data) override { + } + + private: + td::actor::ActorId peer_manager_; + ton::adnl::AdnlNodeIdShort dst_; + }; + send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay_ex, src_id, overlay_id_.clone(), + std::make_unique(actor_id(this), src_id), rules, R"({ "type": "storage" })", + !client_mode_); + } + promise.set_value({}); + } + + td::Result peer_to_andl(ton::PeerId id) { + if (id <= 0 || id > adnl_ids_.size()) { + return td::Status::Error(PSLICE() << "Invalid peer id " << id); + } + return adnl_ids_[id - 1]; + } + + ton::PeerId register_adnl_id(ton::adnl::AdnlNodeIdShort id) { + auto it = adnl_to_peer_id_.emplace(id, next_peer_id_); + if (it.second) { + adnl_ids_.push_back(id); + next_peer_id_++; + } + return it.first->second; + } + + void get_peers(ton::PeerId src, td::Promise> promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + send_closure(overlays_, &ton::overlay::Overlays::get_overlay_random_peers, src_id, overlay_id_.compute_short_id(), + 30, promise.send_closure(actor_id(this), &PeerManager::got_overlay_random_peers)); + } + + void get_peer_info(ton::PeerId src, ton::PeerId peer, td::Promise> promise) { + TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); + TRY_RESULT_PROMISE(promise, peer_id, peer_to_andl(peer)); + td::actor::send_closure( + adnl_, &ton::adnl::Adnl::get_conn_ip_str, src_id, peer_id, + promise.wrap([peer_id](std::string s) { return std::make_pair(peer_id.bits256_value(), std::move(s)); })); + } + + static td::unique_ptr create_callback(td::actor::ActorId peer_manager) { + class Context : public ton::NodeActor::NodeCallback { + public: + Context(td::actor::ActorId peer_manager) : peer_manager_(peer_manager) { + } + void get_peers(ton::PeerId src, td::Promise> promise) override { + send_closure(peer_manager_, &PeerManager::get_peers, src, std::move(promise)); + } + void register_self(td::actor::ActorId self) override { + CHECK(self_.empty()); + self_ = self; + send_closure(peer_manager_, &PeerManager::register_node, 1, self_); + } + ~Context() override { + if (!self_.empty()) { + send_closure(peer_manager_, &PeerManager::unregister_node, 1, self_); + } + } + td::actor::ActorOwn create_peer(ton::PeerId self_id, ton::PeerId peer_id, + std::shared_ptr state) override { + CHECK(self_id == 1); + class PeerCallback : public ton::PeerActor::Callback { + public: + PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId peer_manager) + : self_id_(self_id), peer_id_(peer_id), peer_manager_(std::move(peer_manager)) { + } + void register_self(td::actor::ActorId self) override { + CHECK(self_.empty()); + self_ = std::move(self); + send_closure(peer_manager_, &PeerManager::register_peer, self_id_, peer_id_, self_); + } + void send_query(td::uint64 query_id, td::BufferSlice query) override { + send_closure(peer_manager_, &PeerManager::send_query, self_id_, peer_id_, std::move(query), + promise_send_closure(self_, &ton::PeerActor::on_query_result, query_id)); + } + + ~PeerCallback() { + if (!self_.empty()) { + send_closure(peer_manager_, &PeerManager::unregister_peer, self_id_, peer_id_, self_); + } + } + + private: + td::actor::ActorId self_; + ton::PeerId self_id_; + ton::PeerId peer_id_; + td::actor::ActorId peer_manager_; + }; + return td::actor::create_actor(PSLICE() << "PeerActor " << peer_id, + td::make_unique(self_id, peer_id, peer_manager_), + std::move(state)); + } + + void get_peer_info(ton::PeerId src, ton::PeerId peer, + td::Promise> promise) override { + td::actor::send_closure(peer_manager_, &PeerManager::get_peer_info, src, peer, std::move(promise)); + } + + private: + td::actor::ActorId peer_manager_; + std::vector peers_; + td::actor::ActorId self_; + }; + return td::make_unique(std::move(peer_manager)); + } + + private: + ton::overlay::OverlayIdFull overlay_id_; + bool client_mode_ = false; + td::actor::ActorId overlays_; + td::actor::ActorId adnl_; + td::actor::ActorId rldp_; + + std::map, td::actor::ActorId> peers_; + std::map> nodes_; + ton::PeerId next_peer_id_{1}; + std::map adnl_to_peer_id_; + std::vector adnl_ids_; + + std::map subscribed_peers_; + + void got_overlay_random_peers(td::Result> r_peers, + td::Promise> promise) { + TRY_RESULT_PROMISE(promise, peers, std::move(r_peers)); + + std::vector res; + for (auto peer : peers) { + res.push_back(register_adnl_id(peer)); + } + + promise.set_value(std::move(res)); + } +}; diff --git a/storage/PeerState.cpp b/storage/PeerState.cpp index 35d27e43..a760024f 100644 --- a/storage/PeerState.cpp +++ b/storage/PeerState.cpp @@ -27,7 +27,7 @@ void PeerState::notify_node() { } void PeerState::notify_peer() { - if (peer.empty()) { + if (!peer_ready_) { return; } td::actor::send_signals_later(peer, td::actor::ActorSignals::wakeup()); diff --git a/storage/PeerState.h b/storage/PeerState.h index 3a3595f2..c578b544 100644 --- a/storage/PeerState.h +++ b/storage/PeerState.h @@ -25,17 +25,69 @@ #include "td/actor/actor.h" #include - -#include "LoadSpeed.h" +#include namespace ton { using PeerId = td::uint64; using PartId = td::uint32; +// Concurrent buffer for messages with one writer and one reader +// Reader reads all existing messages at once +// TODO: Use some better algorithm here, or maybe a concurrent queue +template +class MessageBuffer { + public: + MessageBuffer() = default; + MessageBuffer(const MessageBuffer&) = delete; + MessageBuffer& operator=(const MessageBuffer&) = delete; + ~MessageBuffer() { + delete ptr_.load(); + } + + void add_element(T x) { + std::vector* vec = ptr_.exchange(nullptr); + if (vec == nullptr) { + vec = new std::vector(); + } + vec->push_back(std::move(x)); + CHECK(ptr_.exchange(vec) == nullptr); + } + + void add_elements(std::vector elements) { + if (elements.empty()) { + return; + } + std::vector* vec = ptr_.exchange(nullptr); + if (vec == nullptr) { + vec = new std::vector(std::move(elements)); + } else { + for (auto& x : elements) { + vec->push_back(std::move(x)); + } + } + CHECK(ptr_.exchange(vec) == nullptr); + } + + std::vector read() { + std::vector* vec = ptr_.exchange(nullptr); + std::vector result; + if (vec != nullptr) { + result = std::move(*vec); + delete vec; + } + return result; + } + private: + std::atomic*> ptr_{nullptr}; +}; + struct PeerState { + explicit PeerState(td::actor::ActorId<> node) : node(std::move(node)) { + } + struct State { - bool will_upload{false}; - bool want_download{false}; + bool will_upload; + bool want_download; auto key() const { return std::tie(will_upload, want_download); } @@ -43,31 +95,39 @@ struct PeerState { return key() == other.key(); } }; - State node_state_; - td::optional peer_state_; - bool peer_online_{false}; + // Thread-safe fields + std::atomic node_state_{State{false, false}}; + std::atomic_bool peer_state_ready_{false}; + std::atomic peer_state_{State{false, false}}; + std::atomic_bool peer_online_{false}; struct Part { td::BufferSlice proof; td::BufferSlice data; }; - std::map>> node_queries_; - std::map>> peer_queries_; + + std::set node_queries_active_; // Node only + MessageBuffer node_queries_; // Node -> Peer + MessageBuffer>> node_queries_results_; // Peer -> Node + + std::set peer_queries_active_; // Peer only + MessageBuffer peer_queries_; // Peer -> Node + MessageBuffer>> peer_queries_results_; // Node -> Peer // Peer -> Node - // update are added to this vector, so reader will be able to process all changes - std::vector peer_ready_parts_; + MessageBuffer peer_ready_parts_; + // Node -> Peer + MessageBuffer node_ready_parts_; // Node -> Peer - // writer writes all new parts to this vector. This state will be eventually synchornized with a peer - std::vector node_ready_parts_; + std::atomic_bool torrent_info_ready_{false}; + std::shared_ptr torrent_info_str_; + std::function torrent_info_response_callback_; - td::actor::ActorId<> node; + const td::actor::ActorId<> node; + std::atomic_bool peer_ready_{false}; td::actor::ActorId<> peer; - LoadSpeed upload; - LoadSpeed download; - void notify_node(); void notify_peer(); }; diff --git a/storage/SharedState.h b/storage/SharedState.h deleted file mode 100644 index 75b02e91..00000000 --- a/storage/SharedState.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - This file is part of TON Blockchain Library. - - TON Blockchain Library is free software: you can redistribute it and/or modify - it under the terms of the GNU Lesser General Public License as published by - the Free Software Foundation, either version 2 of the License, or - (at your option) any later version. - - TON Blockchain Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public License - along with TON Blockchain Library. If not, see . - - Copyright 2017-2020 Telegram Systems LLP -*/ - -#pragma once - -#include -#include "td/utils/MovableValue.h" -#include - -namespace td { -template -class SharedState { - public: - friend class Guard; - class Guard { - public: - Guard(Guard &&) = default; - Guard(SharedState *self) : self(self) { - CHECK(!self->data_->is_locked.exchange(true)); - } - ~Guard() { - if (self.get()) { - CHECK(self.get()->data_->is_locked.exchange(false)); - } - } - T *get() { - return &self.get()->data_->data; - } - T *operator->() { - return get(); - } - - private: - td::MovableValue *> self; - }; - - auto lock() { - return Guard{this}; - } - auto unsafe() { - return &data_->data; - } - - private: - struct Data { - std::atomic is_locked{}; - T data; - }; - std::shared_ptr data_{std::make_shared()}; -}; -} // namespace td diff --git a/storage/Torrent.cpp b/storage/Torrent.cpp index 2012d053..ce0a9d0b 100644 --- a/storage/Torrent.cpp +++ b/storage/Torrent.cpp @@ -23,29 +23,30 @@ #include "td/utils/crypto.h" #include "td/utils/port/Stat.h" #include "td/utils/tl_helpers.h" +#include "td/utils/port/path.h" namespace ton { -Torrent::Torrent(TorrentMeta meta) - : info_(meta.info) - , merkle_tree_(info_.pieces_count(), info_.root_hash) - , piece_is_ready_(info_.pieces_count(), false) { - not_ready_piece_count_ = piece_is_ready_.size(); - header_pieces_count_ = (info_.header_size + info_.piece_size - 1) / info_.piece_size; - not_ready_pending_piece_count_ = header_pieces_count_; - - if (meta.header) { - set_header(meta.header.unwrap()); - } else { - header_str_ = td::BufferSlice(info_.header_size); - } - if (meta.root_proof.not_null()) { - merkle_tree_.add_proof(meta.root_proof); +td::Result Torrent::open(Options options, td::Bits256 hash) { + Torrent res(hash); + if (!options.in_memory) { + if (options.root_dir.empty()) { + options.root_dir = "."; + } + res.set_root_dir(options.root_dir); } + return std::move(res); } td::Result Torrent::open(Options options, TorrentMeta meta) { - Torrent res(std::move(meta)); + Torrent res(td::Bits256(meta.info.get_hash())); + TRY_STATUS(res.init_info(std::move(meta.info))); + if (meta.header) { + TRY_STATUS(res.set_header(meta.header.unwrap())); + } + if (meta.root_proof.not_null()) { + TRY_STATUS(res.merkle_tree_.add_proof(meta.root_proof)); + } if (!options.in_memory) { if (options.root_dir.empty()) { options.root_dir = "."; @@ -64,6 +65,7 @@ td::Result Torrent::open(Options options, td::Slice meta_str) { } const Torrent::Info &Torrent::get_info() const { + CHECK(inited_info_); return info_; } @@ -103,6 +105,9 @@ td::Status Torrent::iterate_piece(Info::PieceInfo piece, F &&f) { } bool Torrent::is_piece_ready(td::uint64 piece_i) const { + if (!inited_info_) { + return false; + } CHECK(piece_i < info_.pieces_count()); return piece_is_ready_[piece_i]; } @@ -133,7 +138,8 @@ Torrent::PartsRange Torrent::get_file_parts_range(size_t i) { return res; } -Torrent::PartsRange Torrent::get_header_parts_range() { +Torrent::PartsRange Torrent::get_header_parts_range() const { + CHECK(inited_info_); PartsRange res; res.begin = 0; res.end = header_pieces_count_; @@ -184,11 +190,14 @@ std::string Torrent::get_stats_str() const { } void Torrent::validate() { - CHECK(header_); + if (!inited_info_ || !header_) { + return; + } std::fill(piece_is_ready_.begin(), piece_is_ready_.end(), false); not_ready_piece_count_ = info_.pieces_count(); + included_ready_size_ = 0; for (auto &chunk : chunks_) { chunk.ready_size = 0; if (root_dir_) { @@ -200,31 +209,25 @@ void Torrent::validate() { } std::vector hashes; - std::vector chunks; + std::vector> pieces; auto flush = [&] { - td::Bitset bitmask; - merkle_tree_.add_chunks(chunks, bitmask); - for (size_t i = 0; i < chunks.size(); i++) { - if (!bitmask.get(i)) { - continue; - } - - auto piece_i = chunks[i].index; + for (size_t piece_i : merkle_tree_.add_pieces(std::move(pieces))) { auto piece = info_.get_piece_info(piece_i); iterate_piece(piece, [&](auto it, auto info) { it->ready_size += info.size; + if (!it->excluded) { + included_ready_size_ += info.size; + } return td::Status::OK(); }); piece_is_ready_[piece_i] = true; ready_parts_count_++; - CHECK(not_ready_piece_count_); not_ready_piece_count_--; } - hashes.clear(); - chunks.clear(); + pieces.clear(); }; td::BufferSlice buf(info_.piece_size); @@ -246,24 +249,23 @@ void Torrent::validate() { auto dest = buf.as_slice().truncate(info.size); TRY_STATUS(it->get_piece(dest, info.chunk_offset, &cache)); sha256.feed(dest); - //LOG(ERROR) << dest; return td::Status::OK(); }); if (is_ok.is_error()) { LOG_IF(ERROR, !skipped) << "Failed: " << is_ok; - LOG(ERROR) << "Failed: " << is_ok; continue; } - MerkleTree::Chunk chunk; - chunk.index = piece_i; - sha256.extract(chunk.hash.as_slice()); - - chunks.push_back(chunk); + td::Bits256 hash; + sha256.extract(hash.as_slice()); + pieces.emplace_back(piece_i, hash); } flush(); } td::Result Torrent::get_piece_data(td::uint64 piece_i) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } CHECK(piece_i < info_.pieces_count()); if (!piece_is_ready_[piece_i]) { return td::Status::Error("Piece is not ready"); @@ -272,6 +274,10 @@ td::Result Torrent::get_piece_data(td::uint64 piece_i) { if (it != pending_pieces_.end()) { return it->second; } + auto it2 = in_memory_pieces_.find(piece_i); + if (it2 != in_memory_pieces_.end()) { + return it2->second.data; + } auto piece = info_.get_piece_info(piece_i); std::string res(piece.size, '\0'); @@ -282,68 +288,114 @@ td::Result Torrent::get_piece_data(td::uint64 piece_i) { } td::Result> Torrent::get_piece_proof(td::uint64 piece_i) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } CHECK(piece_i < info_.pieces_count()); return merkle_tree_.gen_proof(piece_i, piece_i); } td::Status Torrent::add_piece(td::uint64 piece_i, td::Slice data, td::Ref proof) { - TRY_STATUS(merkle_tree_.add_proof(proof)); - //LOG(ERROR) << "Add piece #" << piece_i; + if (fatal_error_.is_error()) { + return fatal_error_.clone().move_as_error_prefix("Fatal error: "); + } + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } + if (!proof.is_null()) { + TRY_STATUS(merkle_tree_.add_proof(proof)); + } CHECK(piece_i < info_.pieces_count()); if (piece_is_ready_[piece_i]) { return td::Status::OK(); } + td::Bits256 hash; + td::sha256(data, hash.as_slice()); + TRY_RESULT(expected_hash, merkle_tree_.get_piece_hash(piece_i)); + if (expected_hash != hash) { + return td::Status::Error("Hash mismatch"); + } piece_is_ready_[piece_i] = true; ready_parts_count_++; - ton::MerkleTree::Chunk chunk; - chunk.index = piece_i; - td::sha256(data, chunk.hash.as_slice()); - TRY_STATUS(merkle_tree_.try_add_chunks({chunk})); - if (chunks_.empty()) { - return add_header_piece(piece_i, data); + if (chunks_.empty() || !enabled_wirte_to_files_) { + return add_pending_piece(piece_i, data); } - return add_validated_piece(piece_i, data); } -td::Status Torrent::add_header_piece(td::uint64 piece_i, td::Slice data) { +td::Status Torrent::add_proof(td::Ref proof) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } + return merkle_tree_.add_proof(std::move(proof)); +} + +td::Status Torrent::add_pending_piece(td::uint64 piece_i, td::Slice data) { pending_pieces_[piece_i] = data.str(); if (piece_i < header_pieces_count_) { - //LOG(ERROR) << "Add header piece #" << piece_i; auto piece = info_.get_piece_info(piece_i); auto dest = header_str_.as_slice().substr(piece.offset); data.truncate(dest.size()); dest.copy_from(data); not_ready_pending_piece_count_--; if (not_ready_pending_piece_count_ == 0) { - //LOG(ERROR) << "Got full header"; TorrentHeader header; - TRY_STATUS(td::unserialize(header, header_str_.as_slice())); // TODO: it is a fatal error - set_header(header); - for (auto &it : pending_pieces_) { - TRY_STATUS(add_validated_piece(it.first, it.second)); + auto S = td::unserialize(header, header_str_.as_slice()); + if (S.is_ok()) { + S = set_header(std::move(header)); + } + if (S.is_error()) { + S = S.move_as_error_prefix("Invalid torrent header: "); + fatal_error_ = S.clone(); + return S; + } + if (enabled_wirte_to_files_) { + add_pending_pieces(); } - pending_pieces_.clear(); } - } else { - LOG(ERROR) << "PENDING"; } return td::Status::OK(); } -std::string Torrent::get_chunk_path(td::Slice name) { +void Torrent::enable_write_to_files() { + if (enabled_wirte_to_files_) { + return; + } + enabled_wirte_to_files_ = true; + if (header_) { + add_pending_pieces(); + } +} + +void Torrent::add_pending_pieces() { + for (auto &p : pending_pieces_) { + td::Status S = add_validated_piece(p.first, std::move(p.second)); + if (S.is_error()) { + LOG(WARNING) << "Failed to add pending piece #" << p.first << ": " << S; + } + } + pending_pieces_.clear(); +} + +std::string Torrent::get_chunk_path(td::Slice name) const { return PSTRING() << root_dir_.value() << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH << name; } +std::string Torrent::get_file_path(size_t i) const { + return get_chunk_path(chunks_.at(i + 1).name); +} + td::Status Torrent::init_chunk_data(ChunkState &chunk) { if (chunk.data) { return td::Status::OK(); } if (root_dir_) { - TRY_RESULT(data, td::FileNoCacheBlobView::create(get_chunk_path(chunk.name), chunk.size, true)); + std::string path = get_chunk_path(chunk.name); + TRY_STATUS(td::mkpath(path)); + TRY_RESULT(data, td::FileNoCacheBlobView::create(path, chunk.size, true)); chunk.data = std::move(data); } else { chunk.data = td::BufferSliceBlobView::create(td::BufferSlice(chunk.size)); @@ -355,22 +407,41 @@ td::Status Torrent::add_validated_piece(td::uint64 piece_i, td::Slice data) { CHECK(!chunks_.empty()); auto piece = info_.get_piece_info(piece_i); + std::set excluded; TRY_STATUS(iterate_piece(piece, [&](auto it, auto info) { + if (it->excluded) { + excluded.insert(it - chunks_.begin()); + return td::Status::OK(); + } TRY_STATUS(init_chunk_data(*it)); - return it->add_piece(data.substr(info.piece_offset, info.size), info.chunk_offset); + TRY_STATUS(it->write_piece(data.substr(info.piece_offset, info.size), info.chunk_offset)); + return td::Status::OK(); + })); + TRY_STATUS(iterate_piece(piece, [&](auto it, auto info) { + if (!it->excluded) { + it->ready_size += info.size; + included_ready_size_ += info.size; + } + return td::Status::OK(); })); piece_is_ready_[piece_i] = true; not_ready_piece_count_--; + if (!excluded.empty()) { + in_memory_pieces_[piece_i] = {data.str(), std::move(excluded)}; + } return td::Status::OK(); } bool Torrent::is_completed() const { - return not_ready_piece_count_ == 0; + return inited_info_ && enabled_wirte_to_files_ && included_ready_size_ == included_size_; } td::Result Torrent::read_file(td::Slice name) { + if (!inited_info_) { + return td::Status::Error("Torrent info not inited"); + } for (auto &chunk : chunks_) { if (chunk.name == name) { td::BufferSlice res(chunk.size); @@ -383,10 +454,12 @@ td::Result Torrent::read_file(td::Slice name) { Torrent::GetMetaOptions::GetMetaOptions() = default; std::string Torrent::get_meta_str(const GetMetaOptions &options) const { + CHECK(inited_info_); return get_meta(options).serialize(); } TorrentMeta Torrent::get_meta(const GetMetaOptions &options) const { + CHECK(inited_info_); TorrentMeta torrent_file; if (options.with_header) { torrent_file.header = header_; @@ -399,17 +472,36 @@ TorrentMeta Torrent::get_meta(const GetMetaOptions &options) const { return torrent_file; } -Torrent::Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunks) - : info_(info) +Torrent::Torrent(td::Bits256 hash) : hash_(hash), inited_info_(false) { +} + +Torrent::Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunks, + std::string root_dir) + : hash_(info.get_hash()) + , inited_info_(true) + , info_(info) + , root_dir_(std::move(root_dir)) , header_(std::move(header)) + , enabled_wirte_to_files_(true) , merkle_tree_(std::move(tree)) , piece_is_ready_(info_.pieces_count(), true) , ready_parts_count_{info_.pieces_count()} - , chunks_(std::move(chunks)) { + , chunks_(std::move(chunks)) + , included_size_(info_.file_size) + , included_ready_size_(info_.file_size) { } -void Torrent::set_header(const TorrentHeader &header) { - header_ = header; +td::Status Torrent::set_header(TorrentHeader header) { + if (header_) { + return td::Status::OK(); + } + auto header_str = header.serialize(); + td::Bits256 header_hash; + td::sha256(header_str.as_slice(), header_hash.as_slice()); + if (header_hash != info_.header_hash) { + return td::Status::Error("Incorrect header hash"); + } + TRY_STATUS_PREFIX(header.validate(info_.file_size, info_.header_size), "Invalid torrent header: "); auto add_chunk = [&](td::Slice name, td::uint64 offset, td::uint64 size) { ChunkState chunk; chunk.name = name.str(); @@ -417,14 +509,17 @@ void Torrent::set_header(const TorrentHeader &header) { chunk.size = size; chunk.offset = offset; chunks_.push_back(std::move(chunk)); + included_size_ += size; }; - add_chunk("", 0, header.serialization_size()); - chunks_.back().data = td::BufferSliceBlobView::create(header.serialize()); + add_chunk("", 0, header_str.size()); + chunks_.back().data = td::BufferSliceBlobView::create(std::move(header_str)); for (size_t i = 0; i < header.files_count; i++) { auto l = header.get_data_begin(i); auto r = header.get_data_end(i); add_chunk(header.get_name(i), l, r - l); } + header_ = std::move(header); + return td::Status::OK(); } size_t Torrent::get_ready_parts_count() const { @@ -432,6 +527,7 @@ size_t Torrent::get_ready_parts_count() const { } std::vector Torrent::chunks_by_piece(td::uint64 piece_id) { + CHECK(inited_info_); std::vector res; auto piece = info_.get_piece_info(piece_id); auto is_ok = iterate_piece(piece, [&](auto it, auto info) { @@ -441,4 +537,165 @@ std::vector Torrent::chunks_by_piece(td::uint64 piece_id) { return res; } +td::Status Torrent::init_info(Info info) { + if (hash_ != info.get_hash()) { + return td::Status::Error("Hash mismatch"); + } + if (inited_info_) { + return td::Status::OK(); + } + auto S = info.validate(); + if (S.is_error()) { + S = S.move_as_error_prefix("Invalid torrent info: "); + fatal_error_ = S.clone(); + return S; + } + inited_info_ = true; + info_ = std::move(info); + merkle_tree_ = MerkleTree(info_.pieces_count(), info_.root_hash); + piece_is_ready_.resize(info_.pieces_count(), false); + not_ready_piece_count_ = piece_is_ready_.size(); + header_pieces_count_ = (info_.header_size + info_.piece_size - 1) / info_.piece_size; + not_ready_pending_piece_count_ = header_pieces_count_; + header_str_ = td::BufferSlice(info_.header_size); + return td::Status::OK(); +} + +void Torrent::set_file_excluded(size_t i, bool excluded) { + CHECK(header_); + CHECK(i + 1 < chunks_.size()); + if (!root_dir_) { + return; // All files are in-memory, nothing to do + } + size_t chunk_i = i + 1; + auto &chunk = chunks_[chunk_i]; + if (chunk.excluded == excluded) { + return; + } + if (excluded) { + included_size_ -= chunk.size; + included_ready_size_ -= chunk.ready_size; + } else { + included_size_ += chunk.size; + included_ready_size_ += chunk.ready_size; + } + chunk.excluded = excluded; + if (!enabled_wirte_to_files_ || excluded) { + return; + } + auto range = get_file_parts_range(i); + for (auto it = in_memory_pieces_.lower_bound(range.begin); it != in_memory_pieces_.end() && it->first < range.end;) { + if (!it->second.pending_chunks.count(chunk_i)) { + ++it; + continue; + } + auto piece_i = it->first; + auto piece = info_.get_piece_info(piece_i); + auto S = [&]() { + auto l = td::max(chunk.offset, piece.offset); + auto r = td::min(chunk.offset + chunk.size, piece.offset + piece.size); + TRY_STATUS(init_chunk_data(chunk)); + TRY_STATUS(chunk.write_piece(it->second.data.substr(l - piece.offset, r - l), l - chunk.offset)); + chunk.ready_size += r - l; + included_ready_size_ += r - l; + return td::Status::OK(); + }(); + if (S.is_error()) { + // Erase piece completely + piece_is_ready_[piece_i] = false; + not_ready_piece_count_++; + iterate_piece(piece, [&](auto it2, auto info) { + if (!it2->excluded) { + included_ready_size_ -= info.size; + } + if (!it2->excluded || !it->second.pending_chunks.count(it2 - chunks_.begin())) { + it2->ready_size -= info.size; + } + return td::Status::OK(); + }); + it = in_memory_pieces_.erase(it); + continue; + } + it->second.pending_chunks.erase(chunk_i); + if (it->second.pending_chunks.empty()) { + it = in_memory_pieces_.erase(it); + } else { + ++it; + } + } +} + +void Torrent::load_from_files(std::string files_path) { + CHECK(inited_header()); + std::vector> new_blobs; + new_blobs.push_back(td::BufferSliceBlobView::create(get_header().serialize())); + size_t files_count = get_files_count().unwrap(); + for (size_t i = 0; i < files_count; ++i) { + std::string new_path = PSTRING() << files_path << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH + << get_file_name(i); + auto R = td::FileNoCacheBlobView::create(new_path, get_file_size(i), false); + if (R.is_error() && files_count == 1) { + R = td::FileNoCacheBlobView::create(files_path, get_file_size(i), false); + } + if (R.is_error()) { + new_blobs.emplace_back(); + } else { + new_blobs.push_back(R.move_as_ok()); + } + } + auto load_new_piece = [&](size_t piece_i) -> td::Result { + auto piece = info_.get_piece_info(piece_i); + bool included = false; + TRY_STATUS(iterate_piece(piece, [&](auto it, IterateInfo info) { + if (!it->excluded) { + included = true; + } + return td::Status::OK(); + })); + if (!included) { + return td::Status::Error("Piece is excluded"); + } + std::string data(piece.size, '\0'); + TRY_STATUS(iterate_piece(piece, [&](auto it, IterateInfo info) { + size_t chunk_i = it - chunks_.begin(); + if (!new_blobs[chunk_i]) { + return td::Status::Error("No such file"); + } + TRY_RESULT(s, new_blobs[chunk_i].value().view_copy(td::MutableSlice(data).substr(info.piece_offset, info.size), + info.chunk_offset)); + if (s != info.size) { + return td::Status::Error("Can't read file"); + } + return td::Status::OK(); + })); + return data; + }; + std::vector> new_pieces; + for (size_t i = 0; i < piece_is_ready_.size(); ++i) { + if (piece_is_ready_[i]) { + continue; + } + auto r_data = load_new_piece(i); + if (r_data.is_error()) { + continue; + } + td::Bits256 hash; + td::sha256(r_data.ok(), hash.as_slice()); + new_pieces.emplace_back(i, hash); + } + size_t added_cnt = 0; + for (size_t i : merkle_tree_.add_pieces(std::move(new_pieces))) { + auto r_data = load_new_piece(i); + if (r_data.is_error()) { + continue; + } + if (add_piece(i, r_data.ok(), {}).is_ok()) { + ++added_cnt; + } + } + if (added_cnt > 0) { + LOG(INFO) << "Loaded " << added_cnt << " new pieces for " << get_hash().to_hex(); + } +} + } // namespace ton diff --git a/storage/Torrent.h b/storage/Torrent.h index a335c4b4..c776c36e 100644 --- a/storage/Torrent.h +++ b/storage/Torrent.h @@ -25,6 +25,7 @@ #include "td/db/utils/BlobView.h" #include +#include namespace ton { class Torrent { @@ -40,6 +41,7 @@ class Torrent { }; // creation + static td::Result open(Options options, td::Bits256 hash); static td::Result open(Options options, TorrentMeta meta); static td::Result open(Options options, td::Slice meta_str); void validate(); @@ -54,7 +56,8 @@ class Torrent { // add piece (with an optional proof) td::Status add_piece(td::uint64 piece_i, td::Slice data, td::Ref proof); - //TODO: add multiple chunks? Merkle tree supports much more general interface + //TODO: add multiple pieces? Merkle tree supports much more general interface + td::Status add_proof(td::Ref proof); bool is_completed() const; @@ -91,19 +94,74 @@ class Torrent { td::CSlice get_file_name(size_t i) const; td::uint64 get_file_size(size_t i) const; td::uint64 get_file_ready_size(size_t i) const; + std::string get_file_path(size_t i) const; struct PartsRange { td::uint64 begin{0}; td::uint64 end{0}; + bool contains(td::uint64 i) const { + return begin <= i && i < end; + } }; PartsRange get_file_parts_range(size_t i); - PartsRange get_header_parts_range(); + PartsRange get_header_parts_range() const; size_t get_ready_parts_count() const; std::vector chunks_by_piece(td::uint64 piece_id); + bool inited_info() const { + return inited_info_; + } + bool inited_header() const { + return (bool)header_; + } + td::Bits256 get_hash() const { + return hash_; + } + std::string get_root_dir() const { + return root_dir_ ? root_dir_.value() : ""; + } + td::Status init_info(Info info); + td::Status set_header(TorrentHeader header); + + void enable_write_to_files(); + void set_file_excluded(size_t i, bool excluded); + bool file_is_excluded(size_t i) const { + return chunks_.at(i).excluded; + } + td::uint64 get_included_size() const { + return header_ ? included_size_ : info_.file_size; + } + td::uint64 get_included_ready_size() const { + return included_ready_size_; + } + + bool is_piece_in_memory(td::uint64 i) const { + return in_memory_pieces_.count(i); + } + std::set get_pieces_in_memory() const { + std::set pieces; + for (const auto &p : in_memory_pieces_) { + pieces.insert(p.first); + } + return pieces; + } + + const td::Status &get_fatal_error() const { + return fatal_error_; + } + + const TorrentHeader &get_header() const { + CHECK(inited_header()) + return header_.value(); + } + + void load_from_files(std::string files_path); + private: + td::Bits256 hash_; + bool inited_info_ = false; Info info_; td::optional root_dir_; @@ -113,6 +171,12 @@ class Torrent { size_t not_ready_pending_piece_count_{0}; size_t header_pieces_count_{0}; std::map pending_pieces_; + bool enabled_wirte_to_files_ = false; + struct InMemoryPiece { + std::string data; + std::set pending_chunks; + }; + std::map in_memory_pieces_; // Pieces that overlap excluded files ton::MerkleTree merkle_tree_; @@ -120,12 +184,15 @@ class Torrent { size_t not_ready_piece_count_{0}; size_t ready_parts_count_{0}; + td::Status fatal_error_ = td::Status::OK(); + struct ChunkState { std::string name; td::uint64 offset{0}; td::uint64 size{0}; td::uint64 ready_size{0}; td::BlobView data; + bool excluded{false}; struct Cache { td::uint64 offset{0}; @@ -137,10 +204,11 @@ class Torrent { return ready_size == size; } - TD_WARN_UNUSED_RESULT td::Status add_piece(td::Slice piece, td::uint64 offset) { + TD_WARN_UNUSED_RESULT td::Status write_piece(td::Slice piece, td::uint64 offset) { TRY_RESULT(written, data.write(piece, offset)); - CHECK(written == piece.size()); - ready_size += written; + if (written != piece.size()) { + return td::Status::Error("Written less than expected"); + } return td::Status::OK(); } bool has_piece(td::uint64 offset, td::uint64 size) { @@ -149,21 +217,24 @@ class Torrent { TD_WARN_UNUSED_RESULT td::Status get_piece(td::MutableSlice dest, td::uint64 offset, Cache *cache = nullptr); }; std::vector chunks_; + td::uint64 included_size_{0}; + td::uint64 included_ready_size_{0}; - explicit Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunk); - explicit Torrent(TorrentMeta meta); + explicit Torrent(td::Bits256 hash); + explicit Torrent(Info info, td::optional header, ton::MerkleTree tree, std::vector chunk, + std::string root_dir); void set_root_dir(std::string root_dir) { root_dir_ = std::move(root_dir); } - std::string get_chunk_path(td::Slice name); + std::string get_chunk_path(td::Slice name) const; td::Status init_chunk_data(ChunkState &chunk); template td::Status iterate_piece(Info::PieceInfo piece, F &&f); + void add_pending_pieces(); - td::Status add_header_piece(td::uint64 piece_i, td::Slice data); + td::Status add_pending_piece(td::uint64 piece_i, td::Slice data); td::Status add_validated_piece(td::uint64 piece_i, td::Slice data); - void set_header(const TorrentHeader &header); }; } // namespace ton diff --git a/storage/TorrentCreator.cpp b/storage/TorrentCreator.cpp index cdd2e4cf..d9d97df7 100644 --- a/storage/TorrentCreator.cpp +++ b/storage/TorrentCreator.cpp @@ -25,11 +25,20 @@ #include "td/utils/PathView.h" #include "td/utils/port/path.h" #include "td/utils/tl_helpers.h" +#include "MicrochunkTree.h" +#include "TorrentHeader.hpp" namespace ton { td::Result Torrent::Creator::create_from_path(Options options, td::CSlice raw_path) { TRY_RESULT(path, td::realpath(raw_path)); TRY_RESULT(stat, td::stat(path)); + std::string root_dir = path; + while (!root_dir.empty() && root_dir.back() == TD_DIR_SLASH) { + root_dir.pop_back(); + } + while (!root_dir.empty() && root_dir.back() != TD_DIR_SLASH) { + root_dir.pop_back(); + } if (stat.is_dir_) { if (!path.empty() && path.back() != TD_DIR_SLASH) { path += TD_DIR_SLASH; @@ -50,17 +59,21 @@ td::Result Torrent::Creator::create_from_path(Options options, td::CSli }); TRY_STATUS(std::move(status)); TRY_STATUS(std::move(walk_status)); + creator.root_dir_ = std::move(root_dir); + std::sort(creator.files_.begin(), creator.files_.end(), + [](const Torrent::Creator::File& a, const Torrent::Creator::File& b) { return a.name < b.name; }); return creator.finalize(); } else { Torrent::Creator creator(options); TRY_STATUS(creator.add_file(td::PathView(path).file_name(), path)); + creator.root_dir_ = std::move(root_dir); return creator.finalize(); } } td::Result Torrent::Creator::create_from_blobs(Options options, td::Span blobs) { Torrent::Creator creator(options); - for (auto &blob : blobs) { + for (auto& blob : blobs) { TRY_STATUS(creator.add_blob(blob.name, blob.data)); } return creator.finalize(); @@ -79,7 +92,7 @@ td::Status Torrent::Creator::add_blob(td::Slice name, td::BlobView blob) { } TD_WARN_UNUSED_RESULT td::Status Torrent::Creator::add_file(td::Slice name, td::CSlice path) { - LOG(INFO) << "Add file " << name << " " << path; + LOG(DEBUG) << "Add file " << name << " " << path; TRY_RESULT(data, td::FileNoCacheBlobView::create(path)); return add_blob(name, std::move(data)); } @@ -115,11 +128,9 @@ td::Result Torrent::Creator::finalize() { auto header_size = header.serialization_size(); auto file_size = header_size + data_offset; - auto chunks_count = (file_size + options_.piece_size - 1) / options_.piece_size; - ton::MerkleTree tree; - tree.init_begin(chunks_count); + auto pieces_count = (file_size + options_.piece_size - 1) / options_.piece_size; std::vector chunks; - size_t chunk_i = 0; + std::vector pieces; auto flush_reader = [&](bool force) { while (true) { auto slice = reader.prepare_read(); @@ -127,16 +138,14 @@ td::Result Torrent::Creator::finalize() { if (slice.empty() || (slice.size() != options_.piece_size && !force)) { break; } - td::UInt256 hash; + td::Bits256 hash; sha256(slice, hash.as_slice()); - CHECK(chunk_i < chunks_count); - tree.init_add_chunk(chunk_i, hash.as_slice()); - chunk_i++; + pieces.push_back(hash); reader.confirm_read(slice.size()); } }; td::uint64 offset = 0; - auto add_blob = [&](auto &&data, td::Slice name) { + auto add_blob = [&](auto data, td::Slice name) { td::uint64 data_offset = 0; while (data_offset < data.size()) { auto dest = writer.prepare_write(); @@ -168,24 +177,25 @@ td::Result Torrent::Creator::finalize() { td::sha256(header_str, info.header_hash.as_slice()); add_blob(td::BufferSliceBlobView::create(td::BufferSlice(header_str)), "").ensure(); - for (auto &file : files_) { + for (auto& file : files_) { add_blob(std::move(file.data), file.name).ensure(); } flush_reader(true); - tree.init_finish(); - CHECK(chunk_i == chunks_count); + CHECK(pieces.size() == pieces_count); CHECK(offset == file_size); + MerkleTree tree(std::move(pieces)); info.header_size = header.serialization_size(); info.piece_size = options_.piece_size; info.description = options_.description; info.file_size = file_size; - info.depth = tree.get_depth(); info.root_hash = tree.get_root_hash(); info.init_cell(); + TRY_STATUS_PREFIX(info.validate(), "Invalid torrent info: "); + TRY_STATUS_PREFIX(header.validate(info.file_size, info.header_size), "Invalid torrent header: "); - Torrent torrent(info, std::move(header), std::move(tree), std::move(chunks)); + Torrent torrent(info, std::move(header), std::move(tree), std::move(chunks), root_dir_); return std::move(torrent); } diff --git a/storage/TorrentCreator.h b/storage/TorrentCreator.h index 5d8d7a1c..b54157c1 100644 --- a/storage/TorrentCreator.h +++ b/storage/TorrentCreator.h @@ -55,13 +55,12 @@ class Torrent::Creator { td::Result finalize(); private: - td::Status init(); - Options options_; struct File { std::string name; td::BlobView data; }; std::vector files_; + std::string root_dir_; }; } // namespace ton diff --git a/storage/TorrentHeader.cpp b/storage/TorrentHeader.cpp index dc3ae265..2d7867e0 100644 --- a/storage/TorrentHeader.cpp +++ b/storage/TorrentHeader.cpp @@ -66,4 +66,76 @@ td::Slice TorrentHeader::get_name(td::uint64 file_i) const { return td::Slice(names).substr(from, till - from); } +static td::Status validate_name(td::Slice name, bool is_dir_name = false) { + if (name.empty()) { + return td::Status::Error("Name can't be empty"); + } + if (name[0] == '/') { + return td::Status::Error("Name can't start with '/'"); + } + if (name.back() == '/' && !is_dir_name) { + return td::Status::Error("Name can't end with '/'"); + } + for (size_t l = 0; l < name.size();) { + size_t r = l + 1; + while (r < name.size() && name[r] != '/') { + ++r; + } + td::Slice s = name.substr(l, r - l); + if (s == "") { + return td::Status::Error("Name can't contain consequitive '/'"); + } + if (s == ".") { + return td::Status::Error("Name can't contain component \".\""); + } + if (s == "..") { + return td::Status::Error("Name can't contain component \"..\""); + } + l = r + 1; + } + return td::Status::OK(); +} + +td::Status TorrentHeader::validate(td::uint64 total_size, td::uint64 header_size) const { + if (serialization_size() != header_size) { + return td::Status::Error("Invalid size"); + } + for (size_t i = 0; i + 1 < files_count; ++i) { + if (name_index[i] > name_index[i + 1]) { + return td::Status::Error("Invalid name offset"); + } + } + if (name_index.back() != names.size()) { + return td::Status::Error("Invalid name offset"); + } + for (size_t i = 0; i < files_count; ++i) { + if (get_data_offset(i) > get_data_offset(i + 1)) { + return td::Status::Error("Invalid data offset"); + } + } + if (get_data_offset(files_count) != total_size) { + return td::Status::Error("Invalid data offset"); + } + + std::set names; + for (size_t i = 0; i < files_count; ++i) { + auto name = get_name(i); + TRY_STATUS_PREFIX(validate_name(name), PSTRING() << "Invalid filename " << name << ": "); + if (!names.insert(name.str()).second) { + return td::Status::Error(PSTRING() << "Duplicate filename " << name); + } + } + if (!dir_name.empty()) { + TRY_STATUS_PREFIX(validate_name(dir_name, true), "Invalid dir_name: "); + } + for (const std::string& name : names) { + std::string name1 = name + '/'; + auto it = names.lower_bound(name1); + if (it != names.end() && it->substr(0, name1.size()) == name1) { + return td::Status::Error(PSTRING() << "Filename " << name << " coincides with directory name"); + } + } + return td::Status::OK(); +} + } // namespace ton diff --git a/storage/TorrentHeader.h b/storage/TorrentHeader.h index 6a13855d..65ea859f 100644 --- a/storage/TorrentHeader.h +++ b/storage/TorrentHeader.h @@ -21,6 +21,7 @@ #include "td/utils/Slice.h" #include "td/utils/buffer.h" +#include "td/utils/Status.h" namespace ton { // fec_info_none#c82a1964 = FecInfo; @@ -37,6 +38,13 @@ namespace ton { // names:(file_names_size * [uint8]) // data:(tot_data_size * [uint8]) // = TorrentHeader; +// +// Filename rules: +// 1) Name can't be empty +// 2) Names in a torrent should be unique +// 3) Name can't start or end with '/' or contain two consequitive '/' +// 4) Components of name can't be equal to "." or ".." +// 5) If there's a name aaa/bbb/ccc, no other name can start with aaa/bbb/ccc/ struct TorrentHeader { td::uint32 files_count{0}; @@ -64,5 +72,7 @@ struct TorrentHeader { void store(StorerT &storer) const; template void parse(ParserT &parser); + + td::Status validate(td::uint64 total_size, td::uint64 header_size) const; }; } // namespace ton diff --git a/storage/TorrentHeader.hpp b/storage/TorrentHeader.hpp index 2b84ccb0..d8f8f719 100644 --- a/storage/TorrentHeader.hpp +++ b/storage/TorrentHeader.hpp @@ -53,7 +53,7 @@ void TorrentHeader::parse(ParserT &parser) { td::uint32 got_type; parse(got_type, parser); if (got_type != type) { - parser.set_error("Unknown fec type"); + parser.set_error("Unknown type"); return; } parse(files_count, parser); diff --git a/storage/TorrentInfo.cpp b/storage/TorrentInfo.cpp index 46c1cf69..48b0d592 100644 --- a/storage/TorrentInfo.cpp +++ b/storage/TorrentInfo.cpp @@ -26,19 +26,18 @@ namespace ton { bool TorrentInfo::pack(vm::CellBuilder &cb) const { - return cb.store_long_bool(depth, 32) && cb.store_long_bool(piece_size, 32) && cb.store_long_bool(file_size, 64) && - cb.store_bits_bool(root_hash) && cb.store_long_bool(header_size, 64) && cb.store_bits_bool(header_hash) && + return cb.store_long_bool(piece_size, 32) && cb.store_long_bool(file_size, 64) && cb.store_bits_bool(root_hash) && + cb.store_long_bool(header_size, 64) && cb.store_bits_bool(header_hash) && vm::CellText::store(cb, description).is_ok(); } bool TorrentInfo::unpack(vm::CellSlice &cs) { - return cs.fetch_uint_to(32, depth) && cs.fetch_uint_to(32, piece_size) && cs.fetch_uint_to(64, file_size) && - cs.fetch_bits_to(root_hash) && cs.fetch_uint_to(64, header_size) && cs.fetch_bits_to(header_hash) && - vm::CellText::fetch_to(cs, description); + return cs.fetch_uint_to(32, piece_size) && cs.fetch_uint_to(64, file_size) && cs.fetch_bits_to(root_hash) && + cs.fetch_uint_to(64, header_size) && cs.fetch_bits_to(header_hash) && vm::CellText::fetch_to(cs, description); } -vm::Cell::Hash TorrentInfo::get_hash() const { - return as_cell()->get_hash(); +td::Bits256 TorrentInfo::get_hash() const { + return as_cell()->get_hash().bits(); } void TorrentInfo::init_cell() { @@ -48,7 +47,7 @@ void TorrentInfo::init_cell() { } td::Ref TorrentInfo::as_cell() const { - CHECK(cell_.not_null()); + CHECK(cell_.not_null()) return cell_; } @@ -63,4 +62,17 @@ TorrentInfo::PieceInfo TorrentInfo::get_piece_info(td::uint64 piece_i) const { info.size = td::min(static_cast(piece_size), file_size - info.offset); return info; } + +td::Status TorrentInfo::validate() const { + if (piece_size == 0) { + return td::Status::Error("Piece size is 0"); + } + if (header_size > file_size) { + return td::Status::Error("Header is too big"); + } + if (description.size() > 1024) { + return td::Status::Error("Description is too long"); + } + return td::Status::OK(); +} } // namespace ton diff --git a/storage/TorrentInfo.h b/storage/TorrentInfo.h index fd4778fa..2aee4858 100644 --- a/storage/TorrentInfo.h +++ b/storage/TorrentInfo.h @@ -23,11 +23,12 @@ #include "td/utils/UInt.h" #include "vm/cells.h" +#include "td/utils/optional.h" namespace ton { -// torrent_info depth:# piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) description:Text = TorrentInfo; +// torrent_info piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) +// description:Text = TorrentInfo; struct TorrentInfo { - td::uint32 depth{0}; td::uint32 piece_size{768 * 128}; td::uint64 file_size{0}; td::Bits256 root_hash; @@ -39,7 +40,7 @@ struct TorrentInfo { bool unpack(vm::CellSlice &cs); void init_cell(); - vm::Cell::Hash get_hash() const; + td::Bits256 get_hash() const; td::Ref as_cell() const; struct PieceInfo { @@ -50,6 +51,8 @@ struct TorrentInfo { td::uint64 pieces_count() const; PieceInfo get_piece_info(td::uint64 piece_i) const; + td::Status validate() const; + private: td::Ref cell_; }; diff --git a/storage/TorrentMeta.cpp b/storage/TorrentMeta.cpp index 0e677715..1a8fdd89 100644 --- a/storage/TorrentMeta.cpp +++ b/storage/TorrentMeta.cpp @@ -40,8 +40,6 @@ td::Result TorrentMeta::deserialize(td::Slice data) { if (header_hash != res.info.header_hash) { return td::Status::Error("Header hash mismatch"); } - } else { - LOG(ERROR) << "NO HEADER"; } if (res.root_proof.not_null()) { auto root = vm::MerkleProof::virtualize(res.root_proof, 1); diff --git a/storage/db.h b/storage/db.h new file mode 100644 index 00000000..269be0d9 --- /dev/null +++ b/storage/db.h @@ -0,0 +1,57 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "td/db/KeyValueAsync.h" +#include "tl-utils/common-utils.hpp" + +namespace db { + +using DbType = td::KeyValueAsync; + +template +inline void db_get(DbType& db, td::Bits256 key, bool allow_not_found, + td::Promise> promise) { + db.get(key, [allow_not_found, promise = std::move(promise)](td::Result R) mutable { + TRY_RESULT_PROMISE(promise, r, std::move(R)); + if (r.status == td::KeyValueReader::GetStatus::NotFound) { + if (allow_not_found) { + promise.set_value(nullptr); + } else { + promise.set_error(td::Status::Error("Key not found")); + } + return; + } + promise.set_result(ton::fetch_tl_object(r.value, true)); + }); +} + +template +inline td::Result> db_get(td::KeyValue& db, td::Bits256 key, bool allow_not_found) { + std::string value; + TRY_RESULT(r, db.get(key.as_slice(), value)); + if (r == td::KeyValue::GetStatus::NotFound) { + if (allow_not_found) { + return nullptr; + } else { + return td::Status::Error("Key not found"); + } + } + return ton::fetch_tl_object(td::Slice(value), true); +} + +} // namespace db \ No newline at end of file diff --git a/storage/storage-cli.cpp b/storage/storage-cli.cpp index 06cb0055..43ddbeaf 100644 --- a/storage/storage-cli.cpp +++ b/storage/storage-cli.cpp @@ -23,8 +23,6 @@ #include "dht/dht.h" #include "keys/encryptor.h" #include "overlay/overlay.h" -#include "rldp/rldp.h" -#include "rldp2/rldp.h" #include "td/utils/JsonBuilder.h" #include "td/utils/port/signals.h" @@ -37,13 +35,12 @@ #include "td/utils/filesystem.h" #include "td/utils/port/path.h" -#include "td/actor/actor.h" #include "td/actor/MultiPromise.h" #include "terminal/terminal.h" #include "Torrent.h" #include "TorrentCreator.h" -#include "NodeActor.h" +#include "PeerManager.h" #include "auto/tl/ton_api_json.h" @@ -53,8 +50,6 @@ #include #include "git.h" -namespace ton_rldp = ton::rldp2; - struct StorageCliOptions { std::string config; bool enable_readline{true}; @@ -67,199 +62,6 @@ struct StorageCliOptions { using AdnlCategory = td::int32; -class PeerManager : public td::actor::Actor { - public: - PeerManager(ton::adnl::AdnlNodeIdShort adnl_id, ton::overlay::OverlayIdFull overlay_id, - td::actor::ActorId overlays, td::actor::ActorId adnl, - td::actor::ActorId rldp) - : adnl_id_(std::move(adnl_id)) - , overlay_id_(std::move(overlay_id)) - , overlays_(std::move(overlays)) - , adnl_(std::move(adnl)) - , rldp_(std::move(rldp)) { - CHECK(register_adnl_id(adnl_id_) == 1); - } - void start_up() override { - // TODO: forbid broadcasts? - auto rules = ton::overlay::OverlayPrivacyRules{ton::overlay::Overlays::max_fec_broadcast_size()}; - class Callback : public ton::overlay::Overlays::Callback { - public: - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, - td::BufferSlice data) override { - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, td::BufferSlice data, - td::Promise promise) override { - } - void receive_broadcast(ton::PublicKeyHash src, ton::overlay::OverlayIdShort overlay_id, - td::BufferSlice data) override { - } - }; - send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay, adnl_id_, overlay_id_.clone(), - std::make_unique(), rules, "{ \"type\": \"storage\" }"); - } - void tear_down() override { - send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, adnl_id_, overlay_id_.compute_short_id()); - } - void send_query(ton::PeerId src, ton::PeerId dst, td::BufferSlice query, td::Promise promise) { - TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); - TRY_RESULT_PROMISE(promise, dst_id, peer_to_andl(dst)); - query = ton::create_serialize_tl_object_suffix( - std::move(query), overlay_id_.compute_short_id().bits256_value()); - send_closure(rldp_, &ton_rldp::Rldp::send_query_ex, src_id, dst_id, "", std::move(promise), td::Timestamp::in(10), - std::move(query), 1 << 25); - } - - void execute_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) { - data = data.from_slice(data.as_slice().substr(4 + 32)); - auto src_id = register_adnl_id(src); - auto dst_id = register_adnl_id(dst); - auto it = peers_.find(std::make_pair(dst_id, src_id)); - if (it == peers_.end()) { - auto node_it = nodes_.find(dst_id); - if (node_it == nodes_.end()) { - LOG(ERROR) << "Unknown query destination"; - promise.set_error(td::Status::Error("Unknown query destination")); - return; - } - if (!node_it->second.is_alive()) { - LOG(ERROR) << "Expired query destination"; - promise.set_error(td::Status::Error("Unknown query destination")); - return; - } - send_closure(node_it->second, &ton::NodeActor::start_peer, src_id, - [promise = std::move(promise), - data = std::move(data)](td::Result> r_peer) mutable { - TRY_RESULT_PROMISE(promise, peer, std::move(r_peer)); - send_closure(peer, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); - }); - return; - } - send_closure(it->second, &ton::PeerActor::execute_query, std::move(data), std::move(promise)); - } - - void register_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { - peers_[std::make_pair(src, dst)] = std::move(peer); - register_src(src, [](td::Result res) { res.ensure(); }); - } - - void register_node(ton::PeerId src, td::actor::ActorId node) { - nodes_[src] = std::move(node); - register_src(src, [](td::Result res) { res.ensure(); }); - } - - void unregister_node(ton::PeerId src, td::actor::ActorId node) { - auto it = nodes_.find(src); - CHECK(it != nodes_.end()); - if (it->second == node) { - nodes_.erase(it); - } - unregister_src(src, [](td::Result res) { res.ensure(); }); - } - - void unregister_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId peer) { - auto it = peers_.find(std::make_pair(src, dst)); - CHECK(it != peers_.end()); - if (it->second == peer) { - peers_.erase(it); - } - unregister_src(src, [](td::Result res) { res.ensure(); }); - } - - void unregister_src(ton::PeerId src, td::Promise promise) { - TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); - if (--subscribed_peers_[src] == 0) { - LOG(ERROR) << "Unsubscribe " << src_id; - subscribed_peers_.erase(src); - send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, src_id, - ton::create_serialize_tl_object( - overlay_id_.compute_short_id().bits256_value()) - .as_slice() - .str()); - } - promise.set_value({}); - } - void register_src(ton::PeerId src, td::Promise promise) { - TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src)); - class Callback : public ton::adnl::Adnl::Callback { - public: - Callback(td::actor::ActorId peer_manager) : peer_manager_(std::move(peer_manager)) { - } - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, - td::BufferSlice data) override { - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) override { - send_closure(peer_manager_, &PeerManager::execute_query, std::move(src), std::move(dst), std::move(data), - std::move(promise)); - } - - private: - td::actor::ActorId peer_manager_; - }; - - if (subscribed_peers_[src]++ == 0) { - LOG(ERROR) << "Subscribe " << src_id; - send_closure(adnl_, &ton::adnl::Adnl::subscribe, src_id, - ton::create_serialize_tl_object( - overlay_id_.compute_short_id().bits256_value()) - .as_slice() - .str(), - std::make_unique(actor_id(this))); - } - promise.set_value({}); - } - - td::Result peer_to_andl(ton::PeerId id) { - if (id <= 0 || id > adnl_ids_.size()) { - return td::Status::Error(PSLICE() << "Invalid peer id " << id); - } - return adnl_ids_[id - 1]; - } - - ton::PeerId register_adnl_id(ton::adnl::AdnlNodeIdShort id) { - auto it = adnl_to_peer_id_.emplace(id, next_peer_id_); - if (it.second) { - LOG(ERROR) << "Register AndlId " << id << " -> " << it.first->second; - adnl_ids_.push_back(id); - next_peer_id_++; - } - return it.first->second; - } - - void get_peers(td::Promise> promise) { - send_closure(overlays_, &ton::overlay::Overlays::get_overlay_random_peers, adnl_id_, overlay_id_.compute_short_id(), - 30, promise.send_closure(actor_id(this), &PeerManager::got_overlay_random_peers)); - } - - private: - ton::adnl::AdnlNodeIdShort adnl_id_; - ton::overlay::OverlayIdFull overlay_id_; - td::actor::ActorId overlays_; - td::actor::ActorId adnl_; - td::actor::ActorId rldp_; - - std::map, td::actor::ActorId> peers_; - std::map> nodes_; - ton::PeerId next_peer_id_{1}; - std::map adnl_to_peer_id_; - std::vector adnl_ids_; - - std::map subscribed_peers_; - - void got_overlay_random_peers(td::Result> r_peers, - td::Promise> promise) { - TRY_RESULT_PROMISE(promise, peers, std::move(r_peers)); - - std::vector res; - for (auto peer : peers) { - res.push_back(register_adnl_id(peer)); - } - - promise.set_value(std::move(res)); - } -}; - class StorageCli : public td::actor::Actor { public: explicit StorageCli(StorageCliOptions options) : options_(std::move(options)) { @@ -338,7 +140,8 @@ class StorageCli : public td::actor::Actor { td::mkdir(options_.db_root).ignore(); keyring_ = ton::keyring::Keyring::create(options_.db_root + "/keyring"); - adnl_network_manager_ = ton::adnl::AdnlNetworkManager::create(td::narrow_cast(options_.addr.get_port())); + adnl_network_manager_ = + ton::adnl::AdnlNetworkManager::create(td::narrow_cast(options_.addr.get_port())); adnl_ = ton::adnl::Adnl::create(options_.db_root, keyring_.get()); td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, adnl_network_manager_.get()); rldp_ = ton_rldp::Rldp::create(adnl_.get()); @@ -430,6 +233,7 @@ class StorageCli : public td::actor::Actor { td::TerminalIO::out() << "create \tCreate torrent from a directory\n"; td::TerminalIO::out() << "info \tPrint info about loaded torrent\n"; td::TerminalIO::out() << "load \tLoad torrent file in memory\n"; + td::TerminalIO::out() << "addhash \tAdd torrent by hash (in hex)\n"; td::TerminalIO::out() << "save \tSave torrent file\n"; td::TerminalIO::out() << "start \tStart torrent downloading/uploading\n"; td::TerminalIO::out() << "seed \tStart torrent uploading\n"; @@ -451,6 +255,8 @@ class StorageCli : public td::actor::Actor { torrent_info(parser.read_all(), std::move(cmd_promise)); } else if (cmd == "load") { cmd_promise.set_result(torrent_load(parser.read_all()).move_map([](auto &&x) { return td::Unit(); })); + } else if (cmd == "addhash") { + cmd_promise.set_result(torrent_add_by_hash(parser.read_all()).move_map([](auto &&x) { return td::Unit(); })); } else if (cmd == "save") { auto id = parser.read_word(); parser.skip_whitespaces(); @@ -544,7 +350,7 @@ class StorageCli : public td::actor::Actor { ton::Torrent::Creator::Options options; options.piece_size = 128 * 1024; TRY_RESULT_PROMISE(promise, torrent, ton::Torrent::Creator::create_from_path(options, path)); - auto hash = torrent.get_info().header_hash; + auto hash = torrent.get_hash(); for (auto &it : infos_) { if (it.second.hash == hash) { promise.set_error(td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")")); @@ -552,6 +358,7 @@ class StorageCli : public td::actor::Actor { } } td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n"; + td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n"; infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn(), td::actor::ActorOwn()}); torrent_id_++; @@ -600,12 +407,12 @@ class StorageCli : public td::actor::Actor { } } - td::actor::ActorOwn create_peer_manager(vm::Cell::Hash hash) { + td::actor::ActorOwn create_peer_manager(td::Bits256 hash) { // create overlay network td::BufferSlice hash_str(hash.as_slice()); ton::overlay::OverlayIdFull overlay_id(std::move(hash_str)); auto adnl_id = ton::adnl::AdnlNodeIdShort{public_key_.compute_short_id()}; - return td::actor::create_actor("PeerManager", adnl_id, std::move(overlay_id), overlays_.get(), + return td::actor::create_actor("PeerManager", adnl_id, std::move(overlay_id), false, overlays_.get(), adnl_.get(), rldp_.get()); } @@ -616,68 +423,17 @@ class StorageCli : public td::actor::Actor { return; } if (ptr->peer_manager.empty()) { - ptr->peer_manager = create_peer_manager(ptr->torrent.value().get_info().get_hash()); + ptr->peer_manager = create_peer_manager(ptr->torrent.value().get_hash()); } ton::PeerId self_id = 1; - class Context : public ton::NodeActor::Callback { + class Callback : public ton::NodeActor::Callback { public: - Context(td::actor::ActorId peer_manager, td::actor::ActorId storage_cli, - ton::PeerId self_id, td::uint32 torrent_id, td::Promise on_completed) - : peer_manager_(peer_manager) - , storage_cli_(std::move(storage_cli)) - , self_id_(self_id) + Callback(td::actor::ActorId storage_cli, td::uint32 torrent_id, td::Promise on_completed) + : storage_cli_(std::move(storage_cli)) , torrent_id_(std::move(torrent_id)) , on_completed_(std::move(on_completed)) { } - void get_peers(td::Promise> promise) override { - send_closure(peer_manager_, &PeerManager::get_peers, std::move(promise)); - } - void register_self(td::actor::ActorId self) override { - CHECK(self_.empty()); - self_ = self; - send_closure(peer_manager_, &PeerManager::register_node, self_id_, self_); - } - ~Context() { - if (!self_.empty()) { - send_closure(peer_manager_, &PeerManager::unregister_node, self_id_, self_); - } - } - td::actor::ActorOwn create_peer(ton::PeerId self_id, ton::PeerId peer_id, - td::SharedState state) override { - CHECK(self_id == self_id_); - class PeerCallback : public ton::PeerActor::Callback { - public: - PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId peer_manager) - : self_id_(self_id), peer_id_(peer_id), peer_manager_(std::move(peer_manager)) { - } - void register_self(td::actor::ActorId self) override { - CHECK(self_.empty()); - self_ = std::move(self); - send_closure(peer_manager_, &PeerManager::register_peer, self_id_, peer_id_, self_); - } - void send_query(td::uint64 query_id, td::BufferSlice query) override { - send_closure(peer_manager_, &PeerManager::send_query, self_id_, peer_id_, std::move(query), - promise_send_closure(self_, &ton::PeerActor::on_query_result, query_id)); - } - - ~PeerCallback() { - if (!self_.empty()) { - send_closure(peer_manager_, &PeerManager::unregister_peer, self_id_, peer_id_, self_); - } - } - - private: - td::actor::ActorId self_; - ton::PeerId self_id_; - ton::PeerId peer_id_; - td::actor::ActorId peer_manager_; - }; - - return td::actor::create_actor(PSLICE() << "ton::PeerActor " << self_id << "->" << peer_id, - td::make_unique(self_id, peer_id, peer_manager_), - std::move(state)); - } void on_completed() override { if (on_completed_) { @@ -691,23 +447,20 @@ class StorageCli : public td::actor::Actor { } private: - td::actor::ActorId peer_manager_; td::actor::ActorId storage_cli_; - ton::PeerId self_id_; td::uint32 torrent_id_; - std::vector peers_; td::Promise on_completed_; - td::actor::ActorId self_; }; td::Promise on_completed; if (wait_download) { on_completed = std::move(promise); } - auto context = - td::make_unique(ptr->peer_manager.get(), actor_id(this), self_id, ptr->id, std::move(on_completed)); - ptr->node = td::actor::create_actor(PSLICE() << "Node#" << self_id, self_id, ptr->torrent.unwrap(), - std::move(context), should_download); + auto callback = td::make_unique(actor_id(this), ptr->id, std::move(on_completed)); + auto context = PeerManager::create_callback(ptr->peer_manager.get()); + ptr->node = + td::actor::create_actor(PSLICE() << "Node#" << self_id, self_id, ptr->torrent.unwrap(), + std::move(callback), std::move(context), nullptr, should_download); td::TerminalIO::out() << "Torrent #" << ptr->id << " started\n"; promise.release().release(); if (promise) { @@ -748,8 +501,11 @@ class StorageCli : public td::actor::Actor { return; } auto file_id_str = parser.read_word(); - size_t file_id = std::numeric_limits::max(); - if (file_id_str != "*") { + size_t file_id = 0; + bool all = false; + if (file_id_str == "*") { + all = true; + } else { TRY_RESULT_PROMISE_ASSIGN(promise, file_id, td::to_integer_safe(file_id_str)); } TRY_RESULT_PROMISE(promise, priority, td::to_integer_safe(parser.read_word())); @@ -757,7 +513,13 @@ class StorageCli : public td::actor::Actor { promise.set_error(td::Status::Error("Priority = 255 is reserved")); return; } - send_closure(ptr->node, &ton::NodeActor::set_file_priority, file_id, priority); + if (all) { + send_closure(ptr->node, &ton::NodeActor::set_all_files_priority, priority, + promise.wrap([](bool) { return td::Unit(); })); + } else { + send_closure(ptr->node, &ton::NodeActor::set_file_priority_by_idx, file_id, priority, + promise.wrap([](bool) { return td::Unit(); })); + } promise.set_value(td::Unit()); } @@ -779,13 +541,40 @@ class StorageCli : public td::actor::Actor { TRY_RESULT(torrent, ton::Torrent::open(options, data)); - auto hash = torrent.get_info().header_hash; + auto hash = torrent.get_hash(); for (auto &it : infos_) { if (it.second.hash == hash) { return td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")"); } } td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n"; + td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n"; + auto res = + infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn(), + td::actor::ActorOwn()}); + torrent_id_++; + return &res.first->second; + } + + td::Result torrent_add_by_hash(td::Slice hash_hex) { + td::Bits256 hash; + if (hash.from_hex(hash_hex) != 256) { + return td::Status::Error("Failed to parse torrent hash"); + } + ton::Torrent::Options options; + options.in_memory = false; + options.root_dir = "."; + options.validate = false; + + TRY_RESULT(torrent, ton::Torrent::open(options, hash)); + + for (auto &it : infos_) { + if (it.second.hash == hash) { + return td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")"); + } + } + td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n"; + td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n"; auto res = infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn(), td::actor::ActorOwn()}); @@ -839,7 +628,8 @@ int main(int argc, char *argv[]) { return (verbosity >= 0 && verbosity <= 20) ? td::Status::OK() : td::Status::Error("verbosity must be 0..20"); }); p.add_option('V', "version", "shows storage-cli build information", [&]() { - std::cout << "storage-cli build information: [ Commit: " << GitMetadata::CommitSHA1() << ", Date: " << GitMetadata::CommitDate() << "]\n"; + std::cout << "storage-cli build information: [ Commit: " << GitMetadata::CommitSHA1() + << ", Date: " << GitMetadata::CommitDate() << "]\n"; std::exit(0); }); p.add_option('C', "config", "set ton config", [&](td::Slice arg) { options.config = arg.str(); }); @@ -859,7 +649,7 @@ int main(int argc, char *argv[]) { std::_Exit(2); } - td::actor::Scheduler scheduler({0}); + td::actor::Scheduler scheduler({3}); scheduler.run_in_context([&] { td::actor::create_actor("console", options).release(); }); scheduler.run(); return 0; diff --git a/storage/storage-daemon/CMakeLists.txt b/storage/storage-daemon/CMakeLists.txt new file mode 100644 index 00000000..4880eece --- /dev/null +++ b/storage/storage-daemon/CMakeLists.txt @@ -0,0 +1,32 @@ +cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) + +add_executable(embed-provider-code smartcont/embed-provider-code.cpp) + +add_custom_command( + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} + COMMAND embed-provider-code smartcont/storage-provider-code.boc smartcont/provider-code.h + COMMENT "Generate provider-code.h" + DEPENDS embed-provider-code smartcont/storage-provider-code.boc + OUTPUT smartcont/provider-code.h +) + +set(STORAGE_DAEMON_SOURCE + storage-daemon.cpp + StorageManager.h + StorageManager.cpp + StorageProvider.h + StorageProvider.cpp + smc-util.h + smc-util.cpp + smartcont/provider-code.h) + +set(STORAGE_DAEMON_CLI_SOURCE + storage-daemon-cli.cpp + ) + +add_executable(storage-daemon ${STORAGE_DAEMON_SOURCE}) +target_link_libraries(storage-daemon storage overlay tdutils tdactor adnl tl_api dht + rldp rldp2 fift-lib memprof git tonlib) + +add_executable(storage-daemon-cli ${STORAGE_DAEMON_CLI_SOURCE}) +target_link_libraries(storage-daemon-cli tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_block terminal git) \ No newline at end of file diff --git a/storage/storage-daemon/StorageManager.cpp b/storage/storage-daemon/StorageManager.cpp new file mode 100644 index 00000000..a62560cf --- /dev/null +++ b/storage/storage-daemon/StorageManager.cpp @@ -0,0 +1,268 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "StorageManager.h" +#include "td/utils/filesystem.h" +#include "td/utils/port/path.h" +#include "td/db/RocksDb.h" +#include "td/actor/MultiPromise.h" + +static overlay::OverlayIdFull get_overlay_id(td::Bits256 hash) { + td::BufferSlice hash_str(hash.as_slice()); + return overlay::OverlayIdFull(std::move(hash_str)); +} + +StorageManager::StorageManager(adnl::AdnlNodeIdShort local_id, std::string db_root, td::unique_ptr callback, + bool client_mode, td::actor::ActorId adnl, + td::actor::ActorId rldp, td::actor::ActorId overlays) + : local_id_(local_id) + , db_root_(std::move(db_root)) + , callback_(std::move(callback)) + , client_mode_(client_mode) + , adnl_(std::move(adnl)) + , rldp_(std::move(rldp)) + , overlays_(std::move(overlays)) { +} + +void StorageManager::start_up() { + CHECK(db_root_ != ""); + td::mkdir(db_root_).ensure(); + db_root_ = td::realpath(db_root_).move_as_ok(); + td::mkdir(db_root_ + "/torrent-db").ensure(); + td::mkdir(db_root_ + "/torrent-files").ensure(); + LOG(INFO) << "Starting Storage manager. DB = " << db_root_; + + db_ = std::make_shared( + std::make_shared(td::RocksDb::open(db_root_ + "/torrent-db").move_as_ok())); + db::db_get( + *db_, create_hash_tl_object(), true, + [SelfId = actor_id(this)](td::Result> R) { + std::vector torrents; + if (R.is_error()) { + LOG(ERROR) << "Failed to load torrent list from db: " << R.move_as_error(); + } else { + auto r = R.move_as_ok(); + if (r != nullptr) { + torrents = std::move(r->torrents_); + } + } + td::actor::send_closure(SelfId, &StorageManager::load_torrents_from_db, std::move(torrents)); + }); +} + +void StorageManager::load_torrents_from_db(std::vector torrents) { + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise([SelfId = actor_id(this)](td::Result R) { + td::actor::send_closure(SelfId, &StorageManager::after_load_torrents_from_db); + }); + for (auto hash : torrents) { + CHECK(!torrents_.count(hash)) + auto& entry = torrents_[hash]; + entry.peer_manager = td::actor::create_actor("PeerManager", local_id_, get_overlay_id(hash), + client_mode_, overlays_, adnl_, rldp_); + NodeActor::load_from_db( + db_, hash, create_callback(hash, entry.closing_state), PeerManager::create_callback(entry.peer_manager.get()), + [SelfId = actor_id(this), hash, + promise = ig.get_promise()](td::Result> R) mutable { + td::actor::send_closure(SelfId, &StorageManager::loaded_torrent_from_db, hash, std::move(R)); + promise.set_result(td::Unit()); + }); + } +} + +void StorageManager::loaded_torrent_from_db(td::Bits256 hash, td::Result> R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to load torrent " << hash.to_hex() << " from db: " << R.move_as_error(); + torrents_.erase(hash); + } else { + auto it = torrents_.find(hash); + CHECK(it != torrents_.end()); + it->second.actor = R.move_as_ok(); + LOG(INFO) << "Loaded torrent " << hash.to_hex() << " from db"; + } +} + +void StorageManager::after_load_torrents_from_db() { + LOG(INFO) << "Finished loading torrents from db (" << torrents_.size() << " torrents)"; + db_store_torrent_list(); + callback_->on_ready(); +} + +td::unique_ptr StorageManager::create_callback( + td::Bits256 hash, std::shared_ptr closing_state) { + class Callback : public NodeActor::Callback { + public: + Callback(td::actor::ActorId id, td::Bits256 hash, + std::shared_ptr closing_state) + : id_(std::move(id)), hash_(hash), closing_state_(std::move(closing_state)) { + } + void on_completed() override { + } + void on_closed(Torrent torrent) override { + CHECK(torrent.get_hash() == hash_); + td::actor::send_closure(id_, &StorageManager::on_torrent_closed, std::move(torrent), closing_state_); + } + + private: + td::actor::ActorId id_; + td::Bits256 hash_; + std::shared_ptr closing_state_; + }; + return td::make_unique(actor_id(this), hash, std::move(closing_state)); +} + +void StorageManager::add_torrent(Torrent torrent, bool start_download, td::Promise promise) { + TRY_STATUS_PROMISE(promise, add_torrent_impl(std::move(torrent), start_download)); + db_store_torrent_list(); + promise.set_result(td::Unit()); +} + +td::Status StorageManager::add_torrent_impl(Torrent torrent, bool start_download) { + td::Bits256 hash = torrent.get_hash(); + if (torrents_.count(hash)) { + return td::Status::Error("Cannot add torrent: duplicate hash"); + } + TorrentEntry& entry = torrents_[hash]; + entry.hash = hash; + entry.peer_manager = td::actor::create_actor("PeerManager", local_id_, get_overlay_id(hash), + client_mode_, overlays_, adnl_, rldp_); + auto context = PeerManager::create_callback(entry.peer_manager.get()); + LOG(INFO) << "Added torrent " << hash.to_hex() << " , root_dir = " << torrent.get_root_dir(); + entry.actor = + td::actor::create_actor("Node", 1, std::move(torrent), create_callback(hash, entry.closing_state), + std::move(context), db_, start_download); + return td::Status::OK(); +} + +void StorageManager::add_torrent_by_meta(TorrentMeta meta, std::string root_dir, bool start_download, + td::Promise promise) { + td::Bits256 hash(meta.info.get_hash()); + Torrent::Options options; + options.root_dir = root_dir.empty() ? db_root_ + "/torrent-files/" + hash.to_hex() : root_dir; + TRY_RESULT_PROMISE(promise, torrent, Torrent::open(std::move(options), std::move(meta))); + add_torrent(std::move(torrent), start_download, std::move(promise)); +} + +void StorageManager::add_torrent_by_hash(td::Bits256 hash, std::string root_dir, bool start_download, + td::Promise promise) { + Torrent::Options options; + options.root_dir = root_dir.empty() ? db_root_ + "/torrent-files/" + hash.to_hex() : root_dir; + TRY_RESULT_PROMISE(promise, torrent, Torrent::open(std::move(options), hash)); + add_torrent(std::move(torrent), start_download, std::move(promise)); +} + +void StorageManager::set_active_download(td::Bits256 hash, bool active, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_should_download, active); + promise.set_result(td::Unit()); +} + +void StorageManager::with_torrent(td::Bits256 hash, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::with_torrent, std::move(promise)); +} + +void StorageManager::get_all_torrents(td::Promise> promise) { + std::vector result; + for (const auto& p : torrents_) { + result.push_back(p.first); + } + promise.set_result(std::move(result)); +} + +void StorageManager::db_store_torrent_list() { + std::vector torrents; + for (const auto& p : torrents_) { + torrents.push_back(p.first); + } + db_->set(create_hash_tl_object(), + create_serialize_tl_object(std::move(torrents)), + [](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to save torrent list to db: " << R.move_as_error(); + } + }); +} + +void StorageManager::set_all_files_priority(td::Bits256 hash, td::uint8 priority, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_all_files_priority, priority, std::move(promise)); +} + +void StorageManager::set_file_priority_by_idx(td::Bits256 hash, size_t idx, td::uint8 priority, + td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_file_priority_by_idx, idx, priority, std::move(promise)); +} + +void StorageManager::set_file_priority_by_name(td::Bits256 hash, std::string name, td::uint8 priority, + td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::set_file_priority_by_name, std::move(name), priority, + std::move(promise)); +} + +void StorageManager::remove_torrent(td::Bits256 hash, bool remove_files, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + LOG(INFO) << "Removing torrent " << hash.to_hex(); + entry->closing_state->removing = true; + entry->closing_state->remove_files = remove_files; + entry->closing_state->promise = std::move(promise); + torrents_.erase(hash); + db_store_torrent_list(); +} + +void StorageManager::load_from(td::Bits256 hash, td::optional meta, std::string files_path, + td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::load_from, std::move(meta), std::move(files_path), + std::move(promise)); +} + +void StorageManager::on_torrent_closed(Torrent torrent, std::shared_ptr closing_state) { + if (!closing_state->removing) { + return; + } + if (closing_state->remove_files && torrent.inited_header()) { + size_t files_count = torrent.get_files_count().unwrap(); + for (size_t i = 0; i < files_count; ++i) { + std::string path = torrent.get_file_path(i); + td::unlink(path).ignore(); + // TODO: Check errors, remove empty directories + } + } + td::rmrf(db_root_ + "/torrent-files/" + torrent.get_hash().to_hex()).ignore(); + NodeActor::cleanup_db(db_, torrent.get_hash(), + [promise = std::move(closing_state->promise)](td::Result R) mutable { + if (R.is_error()) { + LOG(ERROR) << "Failed to cleanup database: " << R.move_as_error(); + } + promise.set_result(td::Unit()); + }); +} + +void StorageManager::wait_for_completion(td::Bits256 hash, td::Promise promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::wait_for_completion, std::move(promise)); +} + +void StorageManager::get_peers_info(td::Bits256 hash, + td::Promise> promise) { + TRY_RESULT_PROMISE(promise, entry, get_torrent(hash)); + td::actor::send_closure(entry->actor, &NodeActor::get_peers_info, std::move(promise)); +} diff --git a/storage/storage-daemon/StorageManager.h b/storage/storage-daemon/StorageManager.h new file mode 100644 index 00000000..38e371d0 --- /dev/null +++ b/storage/storage-daemon/StorageManager.h @@ -0,0 +1,107 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "td/actor/actor.h" +#include "adnl/adnl.h" +#include "rldp2/rldp.h" +#include "overlay/overlays.h" +#include "storage/PeerManager.h" +#include "storage/db.h" + +using namespace ton; + +class StorageManager : public td::actor::Actor { + public: + class Callback { + public: + virtual ~Callback() = default; + virtual void on_ready() = 0; + }; + + StorageManager(adnl::AdnlNodeIdShort local_id, std::string db_root, td::unique_ptr callback, + bool client_mode, td::actor::ActorId adnl, td::actor::ActorId rldp, + td::actor::ActorId overlays); + + void start_up() override; + + void add_torrent(Torrent torrent, bool start_download, td::Promise promise); + void add_torrent_by_meta(TorrentMeta meta, std::string root_dir, bool start_download, td::Promise promise); + void add_torrent_by_hash(td::Bits256 hash, std::string root_dir, bool start_download, td::Promise promise); + + void set_active_download(td::Bits256 hash, bool active, td::Promise promise); + + void with_torrent(td::Bits256 hash, td::Promise promise); + void get_all_torrents(td::Promise> promise); + + void set_all_files_priority(td::Bits256 hash, td::uint8 priority, td::Promise promise); + void set_file_priority_by_idx(td::Bits256 hash, size_t idx, td::uint8 priority, td::Promise promise); + void set_file_priority_by_name(td::Bits256 hash, std::string name, td::uint8 priority, td::Promise promise); + + void remove_torrent(td::Bits256 hash, bool remove_files, td::Promise promise); + void load_from(td::Bits256 hash, td::optional meta, std::string files_path, + td::Promise promise); + + void wait_for_completion(td::Bits256 hash, td::Promise promise); + void get_peers_info(td::Bits256 hash, td::Promise> promise); + + private: + adnl::AdnlNodeIdShort local_id_; + std::string db_root_; + td::unique_ptr callback_; + bool client_mode_ = false; + td::actor::ActorId adnl_; + td::actor::ActorId rldp_; + td::actor::ActorId overlays_; + + std::shared_ptr db_; + + struct TorrentEntry { + td::Bits256 hash; + td::actor::ActorOwn actor; + td::actor::ActorOwn peer_manager; + + struct ClosingState { + bool removing = false; + td::Promise promise; + bool remove_files = false; + }; + std::shared_ptr closing_state = std::make_shared(); + }; + + std::map torrents_; + + td::Status add_torrent_impl(Torrent torrent, bool start_download); + + td::Result get_torrent(td::Bits256 hash) { + auto it = torrents_.find(hash); + if (it == torrents_.end()) { + return td::Status::Error("No such torrent"); + } + return &it->second; + } + + td::unique_ptr create_callback(td::Bits256 hash, + std::shared_ptr closing_state); + + void load_torrents_from_db(std::vector torrents); + void loaded_torrent_from_db(td::Bits256 hash, td::Result> R); + void after_load_torrents_from_db(); + void db_store_torrent_list(); + + void on_torrent_closed(Torrent torrent, std::shared_ptr closing_state); +}; \ No newline at end of file diff --git a/storage/storage-daemon/StorageProvider.cpp b/storage/storage-daemon/StorageProvider.cpp new file mode 100644 index 00000000..013327c9 --- /dev/null +++ b/storage/storage-daemon/StorageProvider.cpp @@ -0,0 +1,840 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "StorageProvider.h" +#include "td/db/RocksDb.h" +#include "td/utils/JsonBuilder.h" +#include "auto/tl/ton_api_json.h" +#include "td/utils/port/path.h" +#include "block/block-auto.h" +#include "common/delay.h" +#include "td/actor/MultiPromise.h" + +td::Result ProviderParams::create(const tl_object_ptr& obj) { + ProviderParams p; + p.accept_new_contracts = obj->accept_new_contracts_; + p.rate_per_mb_day = td::string_to_int256(obj->rate_per_mb_day_); + if (p.rate_per_mb_day.is_null() || p.rate_per_mb_day->sgn() < 0) { + return td::Status::Error("Invalid rate"); + } + p.max_span = obj->max_span_; + p.minimal_file_size = obj->minimal_file_size_; + p.maximal_file_size = obj->maximal_file_size_; + return p; +} + +tl_object_ptr ProviderParams::tl() const { + return create_tl_object( + accept_new_contracts, rate_per_mb_day->to_dec_string(), max_span, minimal_file_size, maximal_file_size); +} + +bool ProviderParams::to_builder(vm::CellBuilder& b) const { + return b.store_long_bool(accept_new_contracts, 1) && store_coins(b, rate_per_mb_day) && + b.store_long_bool(max_span, 32) && b.store_long_bool(minimal_file_size, 64) && + b.store_long_bool(maximal_file_size, 64); +} + +StorageProvider::StorageProvider(ContractAddress account_address, std::string db_root, + td::actor::ActorId tonlib_client, + td::actor::ActorId storage_manager, + td::actor::ActorId keyring) + + : main_address_(account_address) + , db_root_(std::move(db_root)) + , tonlib_client_(std::move(tonlib_client)) + , storage_manager_(std::move(storage_manager)) + , keyring_(std::move(keyring)) { +} + +void StorageProvider::start_up() { + LOG(INFO) << "Initing storage provider, account address: " << main_address_.to_string(); + td::mkdir(db_root_).ensure(); + db_ = std::make_unique(td::RocksDb::open(db_root_).move_as_ok()); + + auto r_state = db::db_get( + *db_, create_hash_tl_object(), true); + r_state.ensure(); + auto state = r_state.move_as_ok(); + if (state) { + last_processed_lt_ = state->last_processed_lt_; + LOG(INFO) << "Loaded storage provider state"; + LOG(INFO) << "Last processed lt: " << last_processed_lt_; + } + + class Callback : public FabricContractWrapper::Callback { + public: + explicit Callback(td::actor::ActorId id) : id_(std::move(id)) { + } + void on_transaction(tl_object_ptr transaction) override { + td::actor::send_closure(id_, &StorageProvider::process_transaction, std::move(transaction)); + } + + private: + td::actor::ActorId id_; + }; + contract_wrapper_ = + td::actor::create_actor("ContractWrapper", main_address_, tonlib_client_, keyring_, + td::make_unique(actor_id(this)), last_processed_lt_); + + auto r_config = db::db_get( + *db_, create_hash_tl_object(), true); + r_config.ensure(); + auto config_obj = r_config.move_as_ok(); + if (config_obj) { + LOG(INFO) << "Loaded config from db"; + config_ = Config(config_obj); + } else { + LOG(INFO) << "Using default config"; + db_store_config(); + } + LOG(INFO) << "Config: max_contracts=" << config_.max_contracts << ", max_total_size=" << config_.max_total_size; + + auto r_contract_list = db::db_get( + *db_, create_hash_tl_object(), true); + r_contract_list.ensure(); + auto contract_list = r_contract_list.move_as_ok(); + if (contract_list) { + LOG(INFO) << "Loading " << contract_list->contracts_.size() << " contracts from db"; + for (auto& c : contract_list->contracts_) { + ContractAddress address(c->wc_, c->addr_); + if (contracts_.count(address)) { + LOG(ERROR) << "Duplicate contract in db: " << address.to_string(); + continue; + } + auto r_contract = db::db_get( + *db_, create_hash_tl_object(address.wc, address.addr), + true); + r_contract.ensure(); + auto db_contract = r_contract.move_as_ok(); + if (!db_contract) { + LOG(ERROR) << "Missing contract in db: " << address.to_string(); + continue; + } + StorageContract& contract = contracts_[address]; + contract.torrent_hash = db_contract->torrent_hash_; + contract.microchunk_hash = db_contract->microchunk_hash_; + contract.created_time = db_contract->created_time_; + contract.state = (StorageContract::State)db_contract->state_; + contract.file_size = db_contract->file_size_; + contract.max_span = db_contract->max_span_; + contract.rate = td::string_to_int256(db_contract->rate_); + contracts_total_size_ += contract.file_size; + + auto r_tree = db::db_get( + *db_, create_hash_tl_object(address.wc, address.addr), true); + r_tree.ensure(); + auto tree = r_tree.move_as_ok(); + if (tree) { + contract.microchunk_tree = std::make_shared(vm::std_boc_deserialize(tree->data_).move_as_ok()); + } + + LOG(INFO) << "Loaded contract from db: " << address.to_string() << ", torrent=" << contract.torrent_hash.to_hex() + << ", state=" << contract.state; + } + } + + for (auto& p : contracts_) { + const ContractAddress& address = p.first; + StorageContract& contract = p.second; + switch (contract.state) { + case StorageContract::st_downloading: + init_new_storage_contract(address, contract); + break; + case StorageContract::st_downloaded: + check_contract_active(address); + break; + case StorageContract::st_active: + contract.check_next_proof_at = td::Timestamp::now(); + break; + case StorageContract::st_closing: + check_storage_contract_deleted(address); + break; + default: + LOG(FATAL) << "Invalid contract state in db"; + } + } + LOG(INFO) << "Loaded contracts from db"; + + alarm(); +} + +void StorageProvider::get_params(td::Promise promise) { + return get_provider_params(tonlib_client_, main_address_, std::move(promise)); +} + +void StorageProvider::get_provider_params(td::actor::ActorId client, + ContractAddress address, td::Promise promise) { + run_get_method( + address, client, "get_storage_params", std::vector>(), + promise.wrap([](std::vector> stack) -> td::Result { + if (stack.size() != 5) { + return td::Status::Error(PSTRING() << "Method returned " << stack.size() << " values, 5 expected"); + } + TRY_RESULT_PREFIX(accept_new_contracts, entry_to_int(stack[0]), "Invalid accept_new_contracts: "); + TRY_RESULT_PREFIX(rate_per_mb_day, entry_to_int(stack[1]), "Invalid rate_per_mb_day: "); + TRY_RESULT_PREFIX(max_span, entry_to_int(stack[2]), "Invalid max_span: "); + TRY_RESULT_PREFIX(minimal_file_size, entry_to_int(stack[3]), "Invalid minimal_file_size: "); + TRY_RESULT_PREFIX(maximal_file_size, entry_to_int(stack[4]), "Invalid maximal_file_size: "); + ProviderParams params; + params.accept_new_contracts = accept_new_contracts; + params.rate_per_mb_day = rate_per_mb_day; + params.max_span = max_span; + params.minimal_file_size = minimal_file_size; + params.maximal_file_size = maximal_file_size; + return params; + })); +} + +void StorageProvider::set_params(ProviderParams params, td::Promise promise) { + vm::CellBuilder b; + b.store_long(0x54cbf19b, 32); // const op::update_storage_params = 0x54cbf19b; + b.store_long(0, 64); // query_id + if (!params.to_builder(b)) { + promise.set_error(td::Status::Error("Failed to store params to builder")); + return; + } + LOG(INFO) << "Sending external message to update provider parameters: " << params.accept_new_contracts << ", " + << params.max_span << ", " << params.rate_per_mb_day << ", " << params.minimal_file_size << ", " + << params.maximal_file_size; + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, main_address_, + td::make_refint(100'000'000), b.as_cellslice(), std::move(promise)); +} + +void StorageProvider::db_store_state() { + LOG(DEBUG) << "db_store_state last_lt=" << last_processed_lt_; + db_->begin_transaction().ensure(); + db_->set(create_hash_tl_object().as_slice(), + create_serialize_tl_object(last_processed_lt_)) + .ensure(); + db_->commit_transaction().ensure(); +} + +void StorageProvider::db_store_config() { + LOG(DEBUG) << "db_store_config"; + db_->begin_transaction().ensure(); + db_->set(create_hash_tl_object().as_slice(), + serialize_tl_object(config_.tl(), true)) + .ensure(); + db_->commit_transaction().ensure(); +} + +void StorageProvider::alarm() { + for (auto& p : contracts_) { + if (p.second.check_next_proof_at && p.second.check_next_proof_at.is_in_past()) { + p.second.check_next_proof_at = td::Timestamp::never(); + check_next_proof(p.first, p.second); + } + alarm_timestamp().relax(p.second.check_next_proof_at); + } +} + +void StorageProvider::process_transaction(tl_object_ptr transaction) { + std::string new_contract_address; + for (auto& message : transaction->out_msgs_) { + auto data = dynamic_cast(message->msg_data_.get()); + if (data == nullptr) { + continue; + } + auto r_body = vm::std_boc_deserialize(data->body_); + if (r_body.is_error()) { + LOG(ERROR) << "Invalid message body in tonlib response: " << r_body.move_as_error(); + continue; + } + td::Ref body = r_body.move_as_ok(); + vm::CellSlice cs = vm::load_cell_slice(body); + // const op::offer_storage_contract = 0x107c49ef; + if (cs.size() >= 32 && cs.prefetch_long(32) == 0x107c49ef) { + new_contract_address = message->destination_->account_address_; + } + } + if (!new_contract_address.empty()) { + auto P = td::PromiseCreator::lambda( + [SelfId = actor_id(this), lt = (td::uint64)transaction->transaction_id_->lt_](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Error during processing new storage contract, skipping: " << R.move_as_error(); + } + }); + on_new_storage_contract(ContractAddress::parse(new_contract_address).move_as_ok(), std::move(P)); + } + + last_processed_lt_ = transaction->transaction_id_->lt_; + db_store_state(); +} + +void StorageProvider::on_new_storage_contract(ContractAddress address, td::Promise promise, int max_retries) { + LOG(INFO) << "Processing new storage contract: " << address.to_string(); + get_storage_contract_data( + address, tonlib_client_, + [SelfId = actor_id(this), address, promise = std::move(promise), + max_retries](td::Result R) mutable { + if (R.is_error()) { + if (max_retries > 0) { + LOG(WARNING) << "Processing new storage contract: " << R.move_as_error() << ", retrying"; + delay_action( + [SelfId = std::move(SelfId), promise = std::move(promise), address = std::move(address), + max_retries]() mutable { + td::actor::send_closure(SelfId, &StorageProvider::on_new_storage_contract, std::move(address), + std::move(promise), max_retries - 1); + }, + td::Timestamp::in(5.0)); + } else { + promise.set_error(R.move_as_error()); + } + return; + } + td::actor::send_closure(SelfId, &StorageProvider::on_new_storage_contract_cont, address, R.move_as_ok(), + std::move(promise)); + }); +} + +void StorageProvider::on_new_storage_contract_cont(ContractAddress address, StorageContractData data, + td::Promise promise) { + auto it = contracts_.emplace(address, StorageContract()); + if (!it.second) { + promise.set_error(td::Status::Error(PSTRING() << "Storage contract already registered: " << address.to_string())); + return; + } + LOG(INFO) << "New storage contract " << address.to_string() << ", torrent hash: " << data.torrent_hash.to_hex(); + LOG(DEBUG) << "Stoage contract data: microchunk_hash=" << data.microchunk_hash << ", balance=" << data.balance + << ", file_size=" << data.file_size << ", next_proof=" << data.next_proof + << ", rate=" << data.rate_per_mb_day << ", max_span=" << data.max_span; + StorageContract& contract = it.first->second; + contract.torrent_hash = data.torrent_hash; + contract.microchunk_hash = data.microchunk_hash; + contract.state = StorageContract::st_downloading; + contract.created_time = (td::uint32)td::Clocks::system(); + contract.file_size = data.file_size; + contract.max_span = data.max_span; + contract.rate = data.rate_per_mb_day; + contracts_total_size_ += contract.file_size; + promise.set_result(td::Unit()); + + if (contracts_.size() <= config_.max_contracts && contracts_total_size_ <= config_.max_total_size) { + db_update_storage_contract(address, true); + init_new_storage_contract(address, contract); + } else { + if (contracts_.size() > config_.max_contracts) { + LOG(WARNING) << "Cannot add new storage contract: too many contracts (limit = " << config_.max_contracts << ")"; + } else { + LOG(WARNING) << "Cannot add new storage contract: total size exceeded (limit = " + << td::format::as_size(config_.max_total_size) << ")"; + } + contract.state = StorageContract::st_closing; + db_update_storage_contract(address, true); + do_close_storage_contract(address); + } +} + +void StorageProvider::db_update_storage_contract(const ContractAddress& address, bool update_list) { + LOG(DEBUG) << "db_update_storage_contract " << address.to_string() << " " << update_list; + db_->begin_transaction().ensure(); + if (update_list) { + std::vector> list; + for (const auto& t : contracts_) { + list.push_back(create_tl_object(t.first.wc, t.first.addr)); + } + db_->set(create_hash_tl_object().as_slice(), + create_serialize_tl_object(std::move(list))) + .ensure(); + } + auto key = create_hash_tl_object(address.wc, address.addr); + auto it = contracts_.find(address); + if (it == contracts_.end()) { + db_->erase(key.as_slice()).ensure(); + } else { + const StorageContract& contract = it->second; + db_->set(key.as_slice(), + create_serialize_tl_object( + contract.torrent_hash, contract.microchunk_hash, contract.created_time, (int)contract.state, + contract.file_size, contract.rate->to_dec_string(), contract.max_span)); + } + db_->commit_transaction().ensure(); +} + +void StorageProvider::db_update_microchunk_tree(const ContractAddress& address) { + LOG(DEBUG) << "db_update_microchunk_tree " << address.to_string(); + db_->begin_transaction().ensure(); + auto key = create_hash_tl_object(address.wc, address.addr); + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.microchunk_tree == nullptr) { + db_->erase(key.as_slice()).ensure(); + } else { + db_->set(key.as_slice(), create_serialize_tl_object( + vm::std_boc_serialize(it->second.microchunk_tree->get_root()).move_as_ok())); + } + db_->commit_transaction().ensure(); +} + +void StorageProvider::init_new_storage_contract(ContractAddress address, StorageContract& contract) { + CHECK(contract.state == StorageContract::st_downloading); + td::actor::send_closure(storage_manager_, &StorageManager::add_torrent_by_hash, contract.torrent_hash, "", false, + [](td::Result R) { + // Ignore errors: error can mean that the torrent already exists, other errors will be caught later + if (R.is_error()) { + LOG(DEBUG) << "Add torrent: " << R.move_as_error(); + } else { + LOG(DEBUG) << "Add torrent: OK"; + } + }); + td::actor::send_closure(storage_manager_, &StorageManager::set_active_download, contract.torrent_hash, true, + [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to init storage contract: " << R.move_as_error(); + td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address); + return; + } + LOG(DEBUG) << "Set active download: OK"; + }); + td::actor::send_closure( + storage_manager_, &StorageManager::wait_for_completion, contract.torrent_hash, + [SelfId = actor_id(this), address, hash = contract.torrent_hash, microchunk_hash = contract.microchunk_hash, + manager = storage_manager_](td::Result R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to download torrent " << hash.to_hex() << ": " << R.move_as_error(); + td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address); + return; + } + LOG(DEBUG) << "Downloaded torrent " << hash; + td::actor::send_closure( + manager, &StorageManager::with_torrent, hash, + [SelfId, address, hash, microchunk_hash](td::Result R) { + auto r_microchunk_tree = [&]() -> td::Result { + TRY_RESULT(state, std::move(R)); + Torrent& torrent = state.torrent; + if (!torrent.is_completed() || torrent.get_included_size() != torrent.get_info().file_size) { + return td::Status::Error("unknown error"); + } + LOG(DEBUG) << "Building microchunk tree for " << hash; + TRY_RESULT(tree, MicrochunkTree::Builder::build_for_torrent(torrent)); + if (tree.get_root_hash() != microchunk_hash) { + return td::Status::Error("microchunk tree hash mismatch"); + } + return tree; + }(); + if (r_microchunk_tree.is_error()) { + LOG(WARNING) << "Failed to download torrent " << hash.to_hex() << ": " << R.move_as_error(); + td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address); + } else { + td::actor::send_closure(SelfId, &StorageProvider::downloaded_torrent, address, + r_microchunk_tree.move_as_ok()); + } + }); + }); +} + +void StorageProvider::downloaded_torrent(ContractAddress address, MicrochunkTree microchunk_tree) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore"; + return; + } + auto& contract = it->second; + LOG(INFO) << "Finished downloading torrent " << contract.torrent_hash.to_hex() << " for contract " + << address.to_string(); + contract.state = StorageContract::st_downloaded; + contract.microchunk_tree = std::make_shared(std::move(microchunk_tree)); + db_update_microchunk_tree(address); + db_update_storage_contract(address, false); + check_contract_active(address); +} + +void StorageProvider::check_contract_active(ContractAddress address, td::Timestamp retry_until, + td::Timestamp retry_false_until) { + get_storage_contract_data(address, tonlib_client_, + [=, SelfId = actor_id(this)](td::Result R) mutable { + if (R.is_error()) { + LOG(WARNING) << "Failed to check that contract is active: " << R.move_as_error(); + if (retry_until && retry_until.is_in_past()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, + address, retry_until, retry_false_until); + }, + td::Timestamp::in(5.0)); + } + return; + } + if (R.ok().active) { + td::actor::send_closure(SelfId, &StorageProvider::activated_storage_contract, address); + } else if (retry_false_until && retry_false_until.is_in_past()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, address, + retry_until, retry_false_until); + }, + td::Timestamp::in(5.0)); + } else { + td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); + } + }); +} + +void StorageProvider::activate_contract_cont(ContractAddress address) { + vm::CellBuilder b; + b.store_long(0x7a361688, 32); // const op::accept_storage_contract = 0x7a361688; + b.store_long(0, 64); // query_id + LOG(DEBUG) << "Sending op::accept_storage_contract to " << address.to_string(); + td::actor::send_closure( + contract_wrapper_, &FabricContractWrapper::send_internal_message, address, td::make_refint(100'000'000), + b.as_cellslice(), [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to send activate message, retrying later: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); }, + td::Timestamp::in(10.0)); + return; + } + td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, address, td::Timestamp::in(60.0), + td::Timestamp::in(40.0)); + }); +} + +void StorageProvider::activated_storage_contract(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore"; + return; + } + LOG(INFO) << "Storage contract " << address.to_string() << " is active"; + auto& contract = it->second; + contract.state = StorageContract::st_active; + db_update_storage_contract(address, false); + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(1.0)); +} + +void StorageProvider::do_close_storage_contract(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore"; + return; + } + LOG(INFO) << "Closing storage contract " << address.to_string(); + auto& contract = it->second; + contract.state = StorageContract::st_closing; + db_update_storage_contract(address, false); + check_storage_contract_deleted(address); +} + +void StorageProvider::send_close_storage_contract(ContractAddress address) { + vm::CellBuilder b; + b.store_long(0x79f937ea, 32); // const op::close_contract = 0x79f937ea; + b.store_long(0, 64); // query_id + LOG(DEBUG) << "Sending op::close_contract to " << address.to_string(); + td::actor::send_closure( + contract_wrapper_, &FabricContractWrapper::send_internal_message, address, td::make_refint(100'000'000), + b.as_cellslice(), [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to send close message, retrying later: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); }, + td::Timestamp::in(10.0)); + return; + } + td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address, + td::Timestamp::in(40.0)); + }); +} + +void StorageProvider::check_storage_contract_deleted(ContractAddress address, td::Timestamp retry_false_until) { + check_contract_exists(address, tonlib_client_, [=, SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address, + retry_false_until); + }, + td::Timestamp::in(10.0)); + return; + } + if (!R.move_as_ok()) { + td::actor::send_closure(SelfId, &StorageProvider::storage_contract_deleted, address); + } else if (retry_false_until && !retry_false_until.is_in_past()) { + delay_action( + [=]() { + td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address, + retry_false_until); + }, + td::Timestamp::in(5.0)); + } else { + td::actor::send_closure(SelfId, &StorageProvider::send_close_storage_contract, address); + } + }); +} + +void StorageProvider::storage_contract_deleted(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end()) { + return; + } + LOG(INFO) << "Storage contract " << address.to_string() << " was deleted"; + td::Bits256 hash = it->second.torrent_hash; + contracts_total_size_ -= it->second.file_size; + contracts_.erase(it); + bool delete_torrent = true; + for (const auto& p : contracts_) { + if (p.second.torrent_hash == hash) { + delete_torrent = false; + break; + } + } + if (delete_torrent) { + LOG(INFO) << "Deleting torrent " << hash.to_hex(); + td::actor::send_closure(storage_manager_, &StorageManager::remove_torrent, hash, true, + [](td::Result R) {}); + } + db_update_storage_contract(address, true); +} + +void StorageProvider::check_next_proof(ContractAddress address, StorageContract& contract) { + if (contract.state != StorageContract::st_active) { + return; + } + CHECK(contract.microchunk_tree != nullptr); + get_storage_contract_data( + address, tonlib_client_, [SelfId = actor_id(this), address](td::Result R) { + td::actor::send_closure(SelfId, &StorageProvider::got_next_proof_info, address, std::move(R)); + }); +} + +void StorageProvider::got_next_proof_info(ContractAddress address, td::Result R) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + return; + } + auto& contract = it->second; + if (R.is_error()) { + LOG(ERROR) << "get_next_proof_info for " << address.to_string() << ": " << R.move_as_error(); + check_contract_exists(address, tonlib_client_, [SelfId = actor_id(this), address](td::Result R) { + td::actor::send_closure(SelfId, &StorageProvider::got_contract_exists, address, std::move(R)); + }); + return; + } + auto data = R.move_as_ok(); + if (data.balance->sgn() == 0) { + LOG(INFO) << "Balance of contract " << address.to_string() << " is zero, closing"; + do_close_storage_contract(address); + return; + } + td::uint32 send_at = data.last_proof_time + data.max_span / 2, now = (td::uint32)td::Clocks::system(); + if (now < send_at) { + LOG(DEBUG) << "Will send proof in " << send_at - now << "s (last_proof_time=" << data.last_proof_time + << ", max_span=" << data.max_span << ")"; + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(send_at - now + 2)); + return; + } + + LOG(INFO) << "Sending proof for " << address.to_string() << ": next_proof=" << data.next_proof + << ", max_span=" << data.max_span << ", last_proof_time=" << data.last_proof_time << " (" + << now - data.last_proof_time << "s ago)"; + td::actor::send_closure( + storage_manager_, &StorageManager::with_torrent, contract.torrent_hash, + [=, SelfId = actor_id(this), tree = contract.microchunk_tree](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Missing torrent for " << address.to_string(); + return; + } + auto state = R.move_as_ok(); + td::uint64 l = data.next_proof / MicrochunkTree::MICROCHUNK_SIZE * MicrochunkTree::MICROCHUNK_SIZE; + td::uint64 r = l + MicrochunkTree::MICROCHUNK_SIZE; + auto proof = tree->get_proof(l, r, state.torrent); + td::actor::send_closure(SelfId, &StorageProvider::got_next_proof, address, std::move(proof)); + }); +} + +void StorageProvider::got_contract_exists(ContractAddress address, td::Result R) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + return; + } + auto& contract = it->second; + if (R.is_error()) { + LOG(ERROR) << "Check contract exists for " << address.to_string() << ": " << R.move_as_error(); + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(10.0)); + return; + } + if (R.ok()) { + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(10.0)); + return; + } + storage_contract_deleted(address); +} + +void StorageProvider::got_next_proof(ContractAddress address, td::Result> R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to build proof: " << R.move_as_error(); + return; + } + LOG(INFO) << "Got proof, sending"; + + vm::CellBuilder b; + b.store_long(0x419d5d4d, 32); // const op::proof_storage = 0x419d5d4d; + b.store_long(0, 64); // query_id + b.store_ref(R.move_as_ok()); + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, address, + td::make_refint(100'000'000), b.as_cellslice(), + [SelfId = actor_id(this), address](td::Result R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to send proof message: " << R.move_as_error(); + } else { + LOG(DEBUG) << "Proof for " << address.to_string() << " was sent"; + } + td::actor::send_closure(SelfId, &StorageProvider::sent_next_proof, address); + }); +} + +void StorageProvider::sent_next_proof(ContractAddress address) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + return; + } + auto& contract = it->second; + alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(30.0)); +} + +void StorageProvider::get_provider_info(bool with_balances, bool with_contracts, + td::Promise> promise) { + auto result = std::make_shared(); + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise(promise.wrap( + [result](td::Unit) { return create_tl_object(std::move(*result)); })); + result->address_ = main_address_.to_string(); + result->config_ = config_.tl(); + result->contracts_count_ = (int)contracts_.size(); + result->contracts_total_size_ = contracts_total_size_; + if (with_balances) { + get_contract_balance(main_address_, tonlib_client_, ig.get_promise().wrap([result](td::RefInt256 balance) { + result->balance_ = balance->to_dec_string(); + return td::Unit(); + })); + } else { + result->balance_ = "-1"; + } + if (with_contracts) { + for (const auto& p : contracts_) { + auto obj = create_tl_object(); + const StorageContract& contract = p.second; + obj->address_ = p.first.to_string(); + obj->state_ = (int)contract.state; + obj->torrent_ = contract.torrent_hash; + obj->created_time_ = contract.created_time; + obj->rate_ = contract.rate->to_dec_string(); + obj->max_span_ = contract.max_span; + obj->file_size_ = contract.file_size; + obj->downloaded_size_ = obj->file_size_; + obj->client_balance_ = "-1"; + obj->contract_balance_ = "-1"; + result->contracts_.push_back(std::move(obj)); + } + size_t i = 0; + for (const auto& p : contracts_) { + const StorageContract& contract = p.second; + if (contract.state == StorageContract::st_downloading) { + td::actor::send_closure(storage_manager_, &StorageManager::with_torrent, contract.torrent_hash, + [i, result, promise = ig.get_promise()](td::Result R) mutable { + if (R.is_error()) { + result->contracts_[i]->downloaded_size_ = 0; + } else { + auto state = R.move_as_ok(); + result->contracts_[i]->downloaded_size_ = state.torrent.get_included_ready_size(); + } + promise.set_result(td::Unit()); + }); + } + if (with_balances) { + get_contract_balance(p.first, tonlib_client_, + [i, result, promise = ig.get_promise()](td::Result R) mutable { + if (R.is_ok()) { + result->contracts_[i]->contract_balance_ = R.ok()->to_dec_string(); + } + promise.set_result(td::Unit()); + }); + get_storage_contract_data(p.first, tonlib_client_, + [i, result, promise = ig.get_promise()](td::Result R) mutable { + auto S = [&]() -> td::Status { + TRY_RESULT(data, std::move(R)); + result->contracts_[i]->client_balance_ = data.balance->to_dec_string(); + return td::Status::OK(); + }(); + promise.set_result(td::Unit()); + }); + } + i += 1; + } + } +} + +void StorageProvider::set_provider_config(Config config, td::Promise promise) { + config_ = config; + LOG(INFO) << "Changing provider config: max_contracts=" << config_.max_contracts + << ", max_total_size=" << config_.max_total_size; + db_store_config(); + promise.set_result(td::Unit()); +} + +void StorageProvider::withdraw(ContractAddress address, td::Promise promise) { + auto it = contracts_.find(address); + if (it == contracts_.end() || it->second.state != StorageContract::st_active) { + promise.set_error(td::Status::Error("No such storage contract")); + return; + } + if (it->second.state != StorageContract::st_active) { + promise.set_error(td::Status::Error("Storage contract is not active")); + return; + } + vm::CellBuilder b; + b.store_long(0x46ed2e94, 32); // const op::withdraw = 0x46ed2e94; + b.store_long(0, 64); // query_id + LOG(INFO) << "Sending op::withdraw to storage contract " << address.to_string(); + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, address, + td::make_refint(100'000'000), b.as_cellslice(), std::move(promise)); +} + +void StorageProvider::send_coins(ContractAddress dest, td::RefInt256 amount, std::string message, + td::Promise promise) { + if (amount->sgn() < 0) { + promise.set_error(td::Status::Error("Amount is negative")); + return; + } + vm::CellBuilder b; + if (!message.empty()) { + b.store_long(0, 32); + if (b.remaining_bits() < message.size() * 8) { + promise.set_error(td::Status::Error("Message is too long (max 122 bytes)")); + return; + } + b.store_bytes(td::Slice(message)); + } + LOG(INFO) << "Sending " << amount << " nanoTON to " << dest.to_string(); + td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, dest, amount, + b.finalize_novm(), std::move(promise)); +} + +void StorageProvider::close_storage_contract(ContractAddress address, td::Promise promise) { + if (!contracts_.count(address)) { + promise.set_error(td::Status::Error("No such storage contract")); + return; + } + do_close_storage_contract(address); + promise.set_result(td::Unit()); +} + +StorageProvider::Config::Config(const tl_object_ptr& obj) + : max_contracts(obj->max_contracts_), max_total_size(obj->max_total_size_) { +} + +tl_object_ptr StorageProvider::Config::tl() const { + return create_tl_object(max_contracts, max_total_size); +} diff --git a/storage/storage-daemon/StorageProvider.h b/storage/storage-daemon/StorageProvider.h new file mode 100644 index 00000000..a7d63f88 --- /dev/null +++ b/storage/storage-daemon/StorageProvider.h @@ -0,0 +1,127 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "td/actor/actor.h" +#include "storage/db.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" +#include "StorageManager.h" +#include "keyring/keyring.h" +#include "smc-util.h" +#include "storage/MicrochunkTree.h" + +using namespace ton; + +struct ProviderParams { + bool accept_new_contracts = false; + td::RefInt256 rate_per_mb_day = td::zero_refint(); + td::uint32 max_span = 0; + td::uint64 minimal_file_size = 0; + td::uint64 maximal_file_size = 0; + + static td::Result create(const tl_object_ptr& obj); + tl_object_ptr tl() const; + bool to_builder(vm::CellBuilder& b) const; +}; + +class StorageProvider : public td::actor::Actor { + public: + struct Config { + td::uint32 max_contracts = 1000; + td::uint64 max_total_size = 128LL << 30; + Config() = default; + explicit Config(const tl_object_ptr& obj); + tl_object_ptr tl() const; + }; + + StorageProvider(ContractAddress address, std::string db_root, + td::actor::ActorId tonlib_client, + td::actor::ActorId storage_manager, td::actor::ActorId keyring); + + void start_up() override; + void alarm() override; + void get_params(td::Promise promise); + static void get_provider_params(td::actor::ActorId, ContractAddress address, + td::Promise promise); + void set_params(ProviderParams params, td::Promise promise); + + void get_provider_info(bool with_balances, bool with_contracts, + td::Promise> promise); + void set_provider_config(Config config, td::Promise promise); + void withdraw(ContractAddress address, td::Promise promise); + void send_coins(ContractAddress dest, td::RefInt256 amount, std::string message, td::Promise promise); + void close_storage_contract(ContractAddress address, td::Promise promise); + + private: + ContractAddress main_address_; + std::string db_root_; + td::actor::ActorId tonlib_client_; + td::actor::ActorId storage_manager_; + td::actor::ActorId keyring_; + td::Promise init_promise_; + Config config_; + + std::unique_ptr db_; + td::actor::ActorOwn contract_wrapper_; + td::uint64 last_processed_lt_ = 0; + + struct StorageContract { + enum State { st_downloading = 0, st_downloaded = 1, st_active = 2, st_closing = 3 }; + td::Bits256 torrent_hash; + td::Bits256 microchunk_hash; + td::uint32 created_time; + State state; + + td::uint64 file_size = 0; + td::uint32 max_span = 0; + td::RefInt256 rate = td::zero_refint(); + + // TODO: Compute and store only one tree for duplicating torrents + std::shared_ptr microchunk_tree; + + td::Timestamp check_next_proof_at = td::Timestamp::never(); + }; + std::map contracts_; + td::uint64 contracts_total_size_ = 0; + + void process_transaction(tl_object_ptr transaction); + + void db_store_state(); + void db_store_config(); + void db_update_storage_contract(const ContractAddress& address, bool update_list); + void db_update_microchunk_tree(const ContractAddress& address); + + void on_new_storage_contract(ContractAddress address, td::Promise promise, int max_retries = 10); + void on_new_storage_contract_cont(ContractAddress address, StorageContractData data, td::Promise promise); + void init_new_storage_contract(ContractAddress address, StorageContract& contract); + void downloaded_torrent(ContractAddress address, MicrochunkTree microchunk_tree); + void check_contract_active(ContractAddress address, td::Timestamp retry_until = td::Timestamp::in(30.0), + td::Timestamp retry_false_until = td::Timestamp::never()); + void activate_contract_cont(ContractAddress address); + void activated_storage_contract(ContractAddress address); + void do_close_storage_contract(ContractAddress address); + void check_storage_contract_deleted(ContractAddress address, + td::Timestamp retry_false_until = td::Timestamp::never()); + void send_close_storage_contract(ContractAddress address); + void storage_contract_deleted(ContractAddress address); + + void check_next_proof(ContractAddress address, StorageContract& contract); + void got_next_proof_info(ContractAddress address, td::Result R); + void got_contract_exists(ContractAddress address, td::Result R); + void got_next_proof(ContractAddress address, td::Result> R); + void sent_next_proof(ContractAddress address); +}; diff --git a/storage/storage-daemon/smartcont/.gitignore b/storage/storage-daemon/smartcont/.gitignore new file mode 100644 index 00000000..5fa1d61f --- /dev/null +++ b/storage/storage-daemon/smartcont/.gitignore @@ -0,0 +1 @@ +provider-code.h diff --git a/storage/storage-daemon/smartcont/README.md b/storage/storage-daemon/smartcont/README.md new file mode 100644 index 00000000..e8950bda --- /dev/null +++ b/storage/storage-daemon/smartcont/README.md @@ -0,0 +1,23 @@ +# Storage Provider +Simple smart-contract system for conclusion of a storage agreements. +- guarantees that the provider stores the file +- no storage - no payment +- no penalties, if provider doesn't store file client can stop payment at any time +- no control that provider upload the file: client can stop payment at any time if not satisfied + +## Storage Agreements Fabric + +Storage provider deploy storage agreements fabric. Any client may request fabric to deploy storage agreement contract. +Fabric provides get-method `get_storage_params` which returns +- `accept_new_contracts?` - whether provider accepts new contracts +- `rate_per_mb_day` - price in nanoTON per Megabyte per day +- `max_span` - maximal timespan between proving file storage which will be paid +- `minimal_file_size` - minimal file size accepted by provider +- `maximal_file_size` - maximal file size accepted by provider + +## Storage agreement +Agreement contract has client account and accept deposits to this account. + +It also knows merkle root and allows provider to withdraw money from client account by providing merkle proof of file storage. + +Client can stop agreement at any time. diff --git a/storage/storage-daemon/smartcont/build.sh b/storage/storage-daemon/smartcont/build.sh new file mode 100755 index 00000000..3ebd3aa4 --- /dev/null +++ b/storage/storage-daemon/smartcont/build.sh @@ -0,0 +1,4 @@ +func -SPA -o storage-contract.fif ../../../crypto/smartcont/stdlib.fc storage-contract.fc && \ + (echo "\"storage-contract.fif\" include boc>B \"storage-contract-code.boc\" B>file" | fift) && \ + func -SPA -o storage-provider.fif ../../../crypto/smartcont/stdlib.fc storage-provider.fc && \ + (echo "\"storage-provider.fif\" include boc>B \"storage-provider-code.boc\" B>file" | fift) diff --git a/storage/storage-daemon/smartcont/constants.fc b/storage/storage-daemon/smartcont/constants.fc new file mode 100644 index 00000000..479cdfa3 --- /dev/null +++ b/storage/storage-daemon/smartcont/constants.fc @@ -0,0 +1,23 @@ +const op::offer_storage_contract = 0x107c49ef; +const op::close_contract = 0x79f937ea; +const op::contract_deployed = 0xbf7bd0c1; +const op::storage_contract_confirmed = 0xd4caedcd; +const op::reward_withdrawal = 0xa91baf56; +const op::storage_contract_terminated = 0xb6236d63; +const op::accept_storage_contract = 0x7a361688; +const op::withdraw = 0x46ed2e94; +const op::proof_storage = 0x419d5d4d; + +const op::update_pubkey = 0x53f34cd6; +const op::update_storage_params = 0x54cbf19b; + +const error::not_enough_money = 1001; +const error::unauthorized = 401; +const error::wrong_proof = 1002; +const error::contract_not_active = 1003; +const error::file_too_small = 1004; +const error::file_too_big = 1005; +const error::no_new_contracts = 1006; +const error::contract_already_active = 1007; +const error::no_microchunk_hash = 1008; +const error::provider_params_changed = 1009; diff --git a/storage/storage-daemon/smartcont/embed-provider-code.cpp b/storage/storage-daemon/smartcont/embed-provider-code.cpp new file mode 100644 index 00000000..9cc38930 --- /dev/null +++ b/storage/storage-daemon/smartcont/embed-provider-code.cpp @@ -0,0 +1,40 @@ +#include +#include +#include + +int main(int argc, char** argv) { + if (argc != 3) { + std::cerr << "Usage: generate-provider-code in.boc out.h\n"; + return 1; + } + std::ifstream in(argv[1], std::ios_base::ate | std::ios_base::binary); + size_t size = in.tellg(); + in.seekg(0, std::ios::beg); + std::vector buf(size); + if (!in.read(buf.data(), size)) { + std::cerr << "Error: cannot read input\n"; + return 1; + } + in.close(); + + std::ofstream out(argv[2]); + out << "// Auto-generated by embed-provider-code\n"; + out << "#pragma once\n"; + out << "const unsigned char STORAGE_PROVIDER_CODE[" << size << "] = {\n "; + for (size_t i = 0; i < size; ++i) { + if (i != 0) { + out << ","; + if (i % 32 == 31) { + out << "\n "; + } + } + out << (int)(unsigned char)buf[i]; + } + out << "\n};\n"; + if (!out) { + std::cerr << "Error: cannot write output\n"; + return 1; + } + out.close(); + return 0; +} diff --git a/storage/storage-daemon/smartcont/storage-contract-code.boc b/storage/storage-daemon/smartcont/storage-contract-code.boc new file mode 100644 index 00000000..03dd18ea Binary files /dev/null and b/storage/storage-daemon/smartcont/storage-contract-code.boc differ diff --git a/storage/storage-daemon/smartcont/storage-contract.fc b/storage/storage-daemon/smartcont/storage-contract.fc new file mode 100644 index 00000000..41e3ee5b --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-contract.fc @@ -0,0 +1,266 @@ +#include "constants.fc"; + + +const CHUNK_SIZE = 64; +const fee::receipt_value = 20000000; +const fee::storage = 10000000; + + + +{- + storage#_ active:Bool + balance:Coins provider:MsgAddress + merkle_hash:uint256 file_size:uint64 next_proof_byte:uint64 + rate_per_mb_day:Coins + max_span:uint32 last_proof_time:uint32 + ^[client:MsgAddress torrent_hash:uint256] = Storage; +-} + +(slice, int) begin_parse_special(cell c) asm "x{D739} s,"; + +int check_proof(int merkle_hash, int byte_to_proof, int file_size, cell file_dict_proof) { + (slice cs, int special) = file_dict_proof.begin_parse_special(); + if (~ special) { + return false; + } + if (cs~load_uint(8) != 3) { ;; Merkle proof + return false; + } + if (cs~load_uint(256) != merkle_hash) { + return false; + } + cell file_dict = cs~load_ref(); + int key_len = 0; + while ((CHUNK_SIZE << key_len) < file_size) { + key_len += 1; + } + (slice data, int found?) = file_dict.udict_get?(key_len, byte_to_proof / CHUNK_SIZE); + if(found?) { + return true; + } + return false; +} + +() add_to_balance(int amount) impure inline_ref { + var ds = get_data().begin_parse(); + var (active, balance, residue) = (ds~load_int(1), ds~load_grams(), ds); + balance += amount; + begin_cell() + .store_int(active, 1) + .store_coins(balance) + .store_slice(residue) + .end_cell().set_data(); +} + +(slice, int) get_client_data(ds) { + ds = ds.preload_ref().begin_parse(); + return (ds~load_msg_addr(), ds~load_uint(256)); +} + +() recv_internal(int msg_value, cell in_msg_full, slice in_msg_body) impure { + slice cs = in_msg_full.begin_parse(); + int flags = cs~load_uint(4); + + if (flags & 1) { ;; ignore all bounced messages + return (); + } + slice sender_address = cs~load_msg_addr(); + + if (in_msg_body.slice_empty?()) { + return add_to_balance(msg_value); + } + int op = in_msg_body~load_uint(32); + if (op == 0) { + return add_to_balance(msg_value); + } + + int query_id = in_msg_body~load_uint(64); + + if(op == op::offer_storage_contract) { + add_to_balance(msg_value - 2 * fee::receipt_value); + var (client, torrent_hash) = get_client_data(get_data().begin_parse()); + var msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(client) + .store_coins(fee::receipt_value) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::contract_deployed, 32) + .store_uint(query_id, 64) + .store_uint(torrent_hash, 256) + .end_cell(); + send_raw_message(msg, 0); + } + + if (op == op::accept_storage_contract) { + var ds = get_data().begin_parse(); + (int active, int balance, slice provider, slice rest) = + (ds~load_int(1), ds~load_coins(), ds~load_msg_addr(), ds); + throw_unless(error::contract_already_active, ~ active); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider)); + begin_cell() + .store_int(true, 1) + .store_coins(balance) + .store_slice(provider) + .store_slice(rest) + .end_cell().set_data(); + var (client, torrent_hash) = get_client_data(rest); + var msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(client) + .store_coins(fee::receipt_value) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::storage_contract_confirmed, 32) + .store_uint(cur_lt(), 64) + .store_uint(torrent_hash, 256) + .end_cell(); + send_raw_message(msg, 0); + } + + if (op == op::close_contract) { + var ds = get_data().begin_parse(); + (int active, int balance, slice provider, slice rest) = + (ds~load_int(1), ds~load_coins(), ds~load_msg_addr(), ds); + var (client, torrent_hash) = get_client_data(rest); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider) | equal_slice_bits(sender_address, client)); + var client_msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(client) + .store_coins(balance) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::storage_contract_terminated, 32) + .store_uint(cur_lt(), 64) + .store_uint(torrent_hash, 256) + .end_cell(); + if(~ active) { + return send_raw_message(client_msg, 128 + 32); + } + send_raw_message(client_msg, 64); + var provider_msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(provider) + .store_coins(0) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::storage_contract_terminated, 32) + .store_uint(cur_lt(), 64) + .store_uint(torrent_hash, 256) + .end_cell(); + return send_raw_message(provider_msg, 128 + 32); + } + + if (op == op::withdraw) { + var ds = get_data().begin_parse(); + (int active, int balance, slice provider) = (ds~load_int(1), ds~load_coins(), ds~load_msg_addr()); + throw_unless(error::contract_not_active, active); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider)); + if(balance > 0) { + raw_reserve(balance + fee::storage, 2); + } + var msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(provider) + .store_coins(fee::receipt_value) + .store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1) + .store_uint(op::reward_withdrawal, 32) + .store_uint(query_id, 64) + .end_cell(); + send_raw_message(msg, 128 + 32); + } + + if (op == op::proof_storage) { + cell file_dict_proof = in_msg_body~load_ref(); + var ds = get_data().begin_parse(); + var (active, + balance, + provider, + merkle_hash, + file_size, + next_proof, + rate_per_mb_day, + max_span, + last_proof_time, + client_data) = (ds~load_int(1), + ds~load_coins(), + ds~load_msg_addr(), + ds~load_uint(256), + ds~load_uint(64), + ds~load_uint(64), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(32), + ds~load_ref()); + throw_unless(error::contract_not_active, active); + throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider)); + throw_unless(error::wrong_proof, check_proof(merkle_hash, next_proof, file_size, file_dict_proof)); + next_proof = rand(file_size); + int actual_span = min(now() - last_proof_time, max_span); + int bounty = muldiv(file_size * rate_per_mb_day, actual_span, 24 * 60 * 60 * 1024 * 1024); + balance = max(0, balance - bounty); + last_proof_time = now(); + begin_cell() + .store_int(true, 1) + .store_coins(balance) + .store_slice(provider) + .store_uint(merkle_hash, 256) + .store_uint(file_size, 64) + .store_uint(next_proof, 64) + .store_coins(rate_per_mb_day) + .store_uint(max_span, 32) + .store_uint(last_proof_time, 32) + .store_ref(client_data) + .end_cell().set_data(); + + ;; Send remaining balance back + cell msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(sender_address) + .store_uint(0, 4 + 1 + 4 + 4 + 64 + 32 + 1 + 1) + .end_cell(); + send_raw_message(msg, 64 + 2); + } +} + +_ get_storage_contract_data() method_id { + var ds = get_data().begin_parse(); + var (active, + balance, + provider, + merkle_hash, + file_size, + next_proof, + rate_per_mb_day, + max_span, + last_proof_time, + rest) = (ds~load_int(1), + ds~load_coins(), + ds~load_msg_addr(), + ds~load_uint(256), + ds~load_uint(64), + ds~load_uint(64), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(32), + ds); + var (client, torrent_hash) = get_client_data(rest); + return (active, balance, provider, merkle_hash, file_size, + next_proof, rate_per_mb_day, max_span, last_proof_time, + client, torrent_hash); +} + +_ get_torrent_hash() method_id { + var (active, balance, provider, merkle_hash, file_size, + next_proof, rate_per_mb_day, max_span, last_proof_time, + client, torrent_hash) = get_storage_contract_data(); + return torrent_hash; +} + +_ is_active() method_id { + return get_data().begin_parse().preload_int(1); +} + +;; next_proof, last_proof_time, max_span +_ get_next_proof_info() method_id { + var (active, balance, provider, merkle_hash, file_size, + next_proof, rate_per_mb_day, max_span, last_proof_time, + client, torrent_hash) = get_storage_contract_data(); + return (next_proof, last_proof_time, max_span); +} \ No newline at end of file diff --git a/storage/storage-daemon/smartcont/storage-contract.fif b/storage/storage-daemon/smartcont/storage-contract.fif new file mode 100644 index 00000000..5dd86983 --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-contract.fif @@ -0,0 +1,421 @@ +"Asm.fif" include +// automatically generated from `../../../crypto/smartcont/stdlib.fc` `storage-contract.fc` incl:`constants.fc` +PROGRAM{ + DECLPROC check_proof + DECLPROC add_to_balance + DECLPROC get_client_data + DECLPROC recv_internal + 86593 DECLMETHOD get_storage_contract_data + 71463 DECLMETHOD get_torrent_hash + 122058 DECLMETHOD is_active + 81490 DECLMETHOD get_next_proof_info + check_proof PROC:<{ + // merkle_hash byte_to_proof file_size file_dict_proof + x{D739} s, // merkle_hash byte_to_proof file_size cs special + NOT // merkle_hash byte_to_proof file_size cs _7 + IFJMP:<{ // merkle_hash byte_to_proof file_size cs + 4 BLKDROP // + FALSE // _8 + }> // merkle_hash byte_to_proof file_size cs + 8 LDU // merkle_hash byte_to_proof file_size _9 cs + SWAP // merkle_hash byte_to_proof file_size cs _9 + 3 NEQINT // merkle_hash byte_to_proof file_size cs _13 + IFJMP:<{ // merkle_hash byte_to_proof file_size cs + 4 BLKDROP // + FALSE // _14 + }> // merkle_hash byte_to_proof file_size cs + 256 LDU // merkle_hash byte_to_proof file_size _15 cs + s0 s4 XCHG // cs byte_to_proof file_size _15 merkle_hash + NEQ // cs byte_to_proof file_size _18 + IFJMP:<{ // cs byte_to_proof file_size + 3 BLKDROP // + FALSE // _19 + }> // cs byte_to_proof file_size + s0 s2 XCHG // file_size byte_to_proof cs + LDREF // file_size byte_to_proof _44 _43 + DROP // file_size byte_to_proof file_dict + 0 PUSHINT // file_size byte_to_proof file_dict key_len=0 + WHILE:<{ + 64 PUSHINT // file_size byte_to_proof file_dict key_len _25=64 + OVER // file_size byte_to_proof file_dict key_len _25=64 key_len + LSHIFT // file_size byte_to_proof file_dict key_len _26 + s4 PUSH // file_size byte_to_proof file_dict key_len _26 file_size + LESS // file_size byte_to_proof file_dict key_len _27 + }>DO<{ // file_size byte_to_proof file_dict key_len + INC // file_size byte_to_proof file_dict key_len + }> // file_size byte_to_proof file_dict key_len + s3 POP // key_len byte_to_proof file_dict + SWAP // key_len file_dict byte_to_proof + 6 RSHIFT# // key_len file_dict _33 + s0 s2 XCHG // _33 file_dict key_len + DICTUGET + NULLSWAPIFNOT // _45 _46 + NIP // found? + IFJMP:<{ // + TRUE // _35 + }> // + FALSE // _36 + }> + add_to_balance PROCREF:<{ + // amount + c4 PUSH // amount _2 + CTOS // amount ds + 1 LDI // amount _7 ds + LDGRAMS // amount active balance residue + s0 s3 XCHG // residue active balance amount + ADD // residue active balance + SWAP + NEWC // residue balance active _13 + 1 STI // residue balance _15 + SWAP // residue _15 balance + STGRAMS // residue _16 + SWAP // _16 residue + STSLICER // _17 + ENDC // _18 + c4 POP + }> + get_client_data PROC:<{ + // ds + PLDREF // _1 + CTOS // ds + LDMSGADDR // _3 ds + 256 LDU // _3 _11 _10 + DROP // _3 _5 + }> + recv_internal PROC:<{ + // msg_value in_msg_full in_msg_body + SWAP // msg_value in_msg_body in_msg_full + CTOS // msg_value in_msg_body cs + 4 LDU // msg_value in_msg_body flags cs + SWAP + 1 PUSHINT // msg_value in_msg_body cs flags _9=1 + AND // msg_value in_msg_body cs _10 + IFJMP:<{ // msg_value in_msg_body cs + 3 BLKDROP // + }> // msg_value in_msg_body cs + LDMSGADDR // msg_value in_msg_body _421 _420 + DROP // msg_value in_msg_body sender_address + OVER // msg_value in_msg_body sender_address in_msg_body + SEMPTY // msg_value in_msg_body sender_address _14 + IFJMP:<{ // msg_value in_msg_body sender_address + 2DROP // msg_value + add_to_balance INLINECALLDICT + }> // msg_value in_msg_body sender_address + SWAP // msg_value sender_address in_msg_body + 32 LDU // msg_value sender_address op in_msg_body + OVER // msg_value sender_address op in_msg_body op + 0 EQINT // msg_value sender_address op in_msg_body _21 + IFJMP:<{ // msg_value sender_address op in_msg_body + 3 BLKDROP // msg_value + add_to_balance INLINECALLDICT + }> // msg_value sender_address op in_msg_body + 64 LDU // msg_value sender_address op query_id in_msg_body + s2 PUSH + 276580847 PUSHINT // msg_value sender_address op query_id in_msg_body op _27=276580847 + EQUAL // msg_value sender_address op query_id in_msg_body _28 + IF:<{ // msg_value sender_address op query_id in_msg_body + s0 s4 XCHG + 40000000 PUSHINT // in_msg_body sender_address op query_id msg_value _31 + SUB // in_msg_body sender_address op query_id _32 + add_to_balance INLINECALLDICT + c4 PUSH // in_msg_body sender_address op query_id _36 + CTOS // in_msg_body sender_address op query_id _37 + get_client_data CALLDICT // in_msg_body sender_address op query_id client torrent_hash + 3212562625 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 + 0 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 + 24 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _42=24 + NEWC // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _42=24 _43 + 6 STU // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _45 + s0 s4 XCHG2 // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _45 client + STSLICER // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _46 + 20000000 PUSHINT // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _46 _47=20000000 + STGRAMS // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _48 + s1 s3 XCHG // in_msg_body sender_address op query_id _40=3212562625 torrent_hash _41=0 _48 + 107 STU // in_msg_body sender_address op query_id _40=3212562625 torrent_hash _62 + s1 s2 XCHG // in_msg_body sender_address op query_id torrent_hash _40=3212562625 _62 + 32 STU // in_msg_body sender_address op query_id torrent_hash _64 + s2 s(-1) PUXC // in_msg_body sender_address op query_id torrent_hash query_id _64 + 64 STU // in_msg_body sender_address op query_id torrent_hash _66 + 256 STU // in_msg_body sender_address op query_id _68 + ENDC // in_msg_body sender_address op query_id msg + 0 PUSHINT // in_msg_body sender_address op query_id msg _70=0 + SENDRAWMSG + }>ELSE<{ + s4 POP // in_msg_body sender_address op query_id + }> + OVER + 2050365064 PUSHINT // in_msg_body sender_address op query_id op _72=2050365064 + EQUAL // in_msg_body sender_address op query_id _73 + IF:<{ // in_msg_body sender_address op query_id + c4 PUSH // in_msg_body sender_address op query_id _75 + CTOS // in_msg_body sender_address op query_id ds + 1 LDI // in_msg_body sender_address op query_id _81 ds + LDGRAMS // in_msg_body sender_address op query_id _81 _84 ds + LDMSGADDR // in_msg_body sender_address op query_id active balance provider rest + s0 s3 XCHG // in_msg_body sender_address op query_id rest balance provider active + NOT // in_msg_body sender_address op query_id rest balance provider _89 + 1007 THROWIFNOT + s5 s0 PUSH2 // in_msg_body sender_address op query_id rest balance provider sender_address provider + SDEQ // in_msg_body sender_address op query_id rest balance provider _92 + 401 THROWIFNOT + TRUE // in_msg_body sender_address op query_id rest balance provider _94 + NEWC // in_msg_body sender_address op query_id rest balance provider _94 _95 + 1 STI // in_msg_body sender_address op query_id rest balance provider _97 + ROT // in_msg_body sender_address op query_id rest provider _97 balance + STGRAMS // in_msg_body sender_address op query_id rest provider _98 + SWAP // in_msg_body sender_address op query_id rest _98 provider + STSLICER // in_msg_body sender_address op query_id rest _99 + OVER // in_msg_body sender_address op query_id rest _99 rest + STSLICER // in_msg_body sender_address op query_id rest _100 + ENDC // in_msg_body sender_address op query_id rest _101 + c4 POP + get_client_data CALLDICT // in_msg_body sender_address op query_id client torrent_hash + LTIME // in_msg_body sender_address op query_id client torrent_hash _107 + 3570068941 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 + 0 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 + 24 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _110=24 + NEWC // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _110=24 _111 + 6 STU // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _113 + s0 s5 XCHG2 // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _113 client + STSLICER // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _114 + 20000000 PUSHINT // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _114 _115=20000000 + STGRAMS // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _116 + s1 s4 XCHG // in_msg_body sender_address op query_id _108=3570068941 torrent_hash _107 _109=0 _116 + 107 STU // in_msg_body sender_address op query_id _108=3570068941 torrent_hash _107 _130 + s1 s3 XCHG // in_msg_body sender_address op query_id _107 torrent_hash _108=3570068941 _130 + 32 STU // in_msg_body sender_address op query_id _107 torrent_hash _132 + s1 s2 XCHG // in_msg_body sender_address op query_id torrent_hash _107 _132 + 64 STU // in_msg_body sender_address op query_id torrent_hash _134 + 256 STU // in_msg_body sender_address op query_id _136 + ENDC // in_msg_body sender_address op query_id msg + 0 PUSHINT // in_msg_body sender_address op query_id msg _138=0 + SENDRAWMSG + }> // in_msg_body sender_address op query_id + OVER + 2046375914 PUSHINT // in_msg_body sender_address op query_id op _140=2046375914 + EQUAL // in_msg_body sender_address op query_id _141 + IFJMP:<{ // in_msg_body sender_address op query_id + s2 s3 XCHG + 3 BLKDROP // sender_address + c4 PUSH // sender_address _143 + CTOS // sender_address ds + 1 LDI // sender_address _149 ds + LDGRAMS // sender_address _149 _152 ds + LDMSGADDR // sender_address active balance provider rest + get_client_data CALLDICT // sender_address active balance provider client torrent_hash + s5 s2 PUSH2 // sender_address active balance provider client torrent_hash sender_address provider + SDEQ // sender_address active balance provider client torrent_hash _160 + s6 s2 XCPU // _160 active balance provider client torrent_hash sender_address client + SDEQ // _160 active balance provider client torrent_hash _161 + s1 s6 XCHG // torrent_hash active balance provider client _160 _161 + OR // torrent_hash active balance provider client _162 + 401 THROWIFNOT + LTIME // torrent_hash active balance provider client _165 + 3055775075 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 + 0 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 _167=0 + 24 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _168=24 + NEWC // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _168=24 _169 + 6 STU // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _171 + s0 s4 XCHG2 // torrent_hash active balance provider _167=0 _165 _166=3055775075 _171 client + STSLICER // torrent_hash active balance provider _167=0 _165 _166=3055775075 _172 + s0 s5 XCHG2 // torrent_hash active _166=3055775075 provider _167=0 _165 _172 balance + STGRAMS // torrent_hash active _166=3055775075 provider _167=0 _165 _173 + s1 s2 XCHG // torrent_hash active _166=3055775075 provider _165 _167=0 _173 + 107 STU // torrent_hash active _166=3055775075 provider _165 _187 + s1 s3 XCHG // torrent_hash active _165 provider _166=3055775075 _187 + 32 STU // torrent_hash active _165 provider _189 + s1 s2 XCHG // torrent_hash active provider _165 _189 + 64 STU // torrent_hash active provider _191 + s3 s(-1) PUXC // torrent_hash active provider torrent_hash _191 + 256 STU // torrent_hash active provider _193 + ENDC // torrent_hash active provider client_msg + s0 s2 XCHG // torrent_hash client_msg provider active + NOT // torrent_hash client_msg provider _195 + IFJMP:<{ // torrent_hash client_msg provider + DROP + NIP // client_msg + 160 PUSHINT // client_msg _198 + SENDRAWMSG + }> // torrent_hash client_msg provider + SWAP + 64 PUSHINT // torrent_hash provider client_msg _200=64 + SENDRAWMSG + LTIME // torrent_hash provider _203 + 3055775075 PUSHINT // torrent_hash provider _203 _204=3055775075 + 0 PUSHINT // torrent_hash provider _203 _204=3055775075 _205=0 + 24 PUSHINT // torrent_hash provider _203 _204=3055775075 _205=0 _206=24 + NEWC // torrent_hash provider _203 _204=3055775075 _205=0 _206=24 _207 + 6 STU // torrent_hash provider _203 _204=3055775075 _205=0 _209 + s0 s4 XCHG2 // torrent_hash _205=0 _203 _204=3055775075 _209 provider + STSLICER // torrent_hash _205=0 _203 _204=3055775075 _210 + s3 PUSH // torrent_hash _205=0 _203 _204=3055775075 _210 _211=0 + STGRAMS // torrent_hash _205=0 _203 _204=3055775075 _212 + s1 s3 XCHG // torrent_hash _204=3055775075 _203 _205=0 _212 + 107 STU // torrent_hash _204=3055775075 _203 _226 + s1 s2 XCHG // torrent_hash _203 _204=3055775075 _226 + 32 STU // torrent_hash _203 _228 + 64 STU // torrent_hash _230 + 256 STU // _232 + ENDC // provider_msg + 160 PUSHINT // provider_msg _236 + SENDRAWMSG + }> // in_msg_body sender_address op query_id + OVER + 1189949076 PUSHINT // in_msg_body sender_address op query_id op _238=1189949076 + EQUAL // in_msg_body sender_address op query_id _239 + IF:<{ // in_msg_body sender_address op query_id + c4 PUSH // in_msg_body sender_address op query_id _241 + CTOS // in_msg_body sender_address op query_id ds + 1 LDI // in_msg_body sender_address op query_id _246 ds + LDGRAMS // in_msg_body sender_address op query_id _246 _249 ds + LDMSGADDR // in_msg_body sender_address op query_id _246 _249 _449 _448 + DROP // in_msg_body sender_address op query_id active balance provider + s0 s2 XCHG // in_msg_body sender_address op query_id provider balance active + 1003 THROWIFNOT + s4 s1 PUSH2 // in_msg_body sender_address op query_id provider balance sender_address provider + SDEQ // in_msg_body sender_address op query_id provider balance _256 + 401 THROWIFNOT + DUP // in_msg_body sender_address op query_id provider balance balance + 0 GTINT // in_msg_body sender_address op query_id provider balance _259 + IF:<{ // in_msg_body sender_address op query_id provider balance + 10000000 PUSHINT // in_msg_body sender_address op query_id provider balance _260=10000000 + ADD // in_msg_body sender_address op query_id provider _261 + 2 PUSHINT // in_msg_body sender_address op query_id provider _261 _262=2 + RAWRESERVE + }>ELSE<{ + DROP // in_msg_body sender_address op query_id provider + }> + 2837163862 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 + 0 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 + 24 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _267=24 + NEWC // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _267=24 _268 + 6 STU // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _270 + s0 s3 XCHG2 // in_msg_body sender_address op query_id _266=0 _265=2837163862 _270 provider + STSLICER // in_msg_body sender_address op query_id _266=0 _265=2837163862 _271 + 20000000 PUSHINT // in_msg_body sender_address op query_id _266=0 _265=2837163862 _271 _272=20000000 + STGRAMS // in_msg_body sender_address op query_id _266=0 _265=2837163862 _273 + s1 s2 XCHG // in_msg_body sender_address op query_id _265=2837163862 _266=0 _273 + 107 STU // in_msg_body sender_address op query_id _265=2837163862 _287 + 32 STU // in_msg_body sender_address op query_id _289 + 64 STU // in_msg_body sender_address op _291 + ENDC // in_msg_body sender_address op msg + 160 PUSHINT // in_msg_body sender_address op msg _295 + SENDRAWMSG + }>ELSE<{ + DROP // in_msg_body sender_address op + }> + 1100832077 PUSHINT // in_msg_body sender_address op _297=1100832077 + EQUAL // in_msg_body sender_address _298 + IF:<{ // in_msg_body sender_address + SWAP // sender_address in_msg_body + LDREF // sender_address _451 _450 + DROP // sender_address file_dict_proof + c4 PUSH // sender_address file_dict_proof _303 + CTOS // sender_address file_dict_proof ds + 1 LDI // sender_address file_dict_proof _315 ds + LDGRAMS // sender_address file_dict_proof _315 _318 ds + LDMSGADDR // sender_address file_dict_proof _315 _318 _320 ds + 256 LDU // sender_address file_dict_proof _315 _318 _320 _322 ds + 64 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 ds + 64 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 ds + LDGRAMS // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 ds + 32 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 ds + 32 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 _336 ds + LDREF // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 _336 _471 _470 + DROP // sender_address file_dict_proof active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time client_data + s0 s9 XCHG // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time active + 1003 THROWIFNOT + s10 s6 PUSH2 // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time sender_address provider + SDEQ // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time _344 + 401 THROWIFNOT + s5 s3 s(-1) PUXC2 + s5 s10 PUXC // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day merkle_hash next_proof file_size file_dict_proof + check_proof CALLDICT // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day _347 + 1002 THROWIFNOT + s2 PUSH // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day file_size + RAND // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day next_proof + NOW // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day next_proof _351 + s0 s3 XCHG2 // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _351 last_proof_time + SUB // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _352 + s8 PUSH // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _352 max_span + MIN // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span + s3 s1 PUSH2 // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span file_size rate_per_mb_day + MUL // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span _355 + SWAP + 90596966400 PUSHINTX // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _355 actual_span _364 + MULDIV // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day bounty + s0 s6 XCHG + 0 PUSHINT + s0 s7 XCHG // sender_address max_span client_data _366=0 provider merkle_hash file_size next_proof rate_per_mb_day balance bounty + SUB // sender_address max_span client_data _366=0 provider merkle_hash file_size next_proof rate_per_mb_day _367 + s1 s6 XCHG // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof _366=0 _367 + MAX // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance + NOW // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time + TRUE // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _370 + NEWC // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _370 _371 + 1 STI // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _373 + ROT // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof last_proof_time _373 balance + STGRAMS // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof last_proof_time _374 + s0 s5 XCHG2 // sender_address max_span client_data rate_per_mb_day last_proof_time merkle_hash file_size next_proof _374 provider + STSLICER // sender_address max_span client_data rate_per_mb_day last_proof_time merkle_hash file_size next_proof _375 + s1 s3 XCHG // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof file_size merkle_hash _375 + 256 STU // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof file_size _377 + 64 STU // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof _379 + 64 STU // sender_address max_span client_data rate_per_mb_day last_proof_time _381 + ROT // sender_address max_span client_data last_proof_time _381 rate_per_mb_day + STGRAMS // sender_address max_span client_data last_proof_time _382 + s1 s3 XCHG // sender_address last_proof_time client_data max_span _382 + 32 STU // sender_address last_proof_time client_data _384 + s1 s2 XCHG // sender_address client_data last_proof_time _384 + 32 STU // sender_address client_data _386 + STREF // sender_address _387 + ENDC // sender_address _388 + c4 POP + 0 PUSHINT // sender_address _391=0 + 24 PUSHINT // sender_address _391=0 _392=24 + NEWC // sender_address _391=0 _392=24 _393 + 6 STU // sender_address _391=0 _395 + ROT // _391=0 _395 sender_address + STSLICER // _391=0 _396 + 111 STU // _412 + ENDC // msg + 66 PUSHINT // msg _416 + SENDRAWMSG + }>ELSE<{ + 2DROP // + }> + }> + get_storage_contract_data PROC:<{ + // + c4 PUSH // _1 + CTOS // ds + 1 LDI // _13 ds + LDGRAMS // _13 _16 ds + LDMSGADDR // _13 _16 _18 ds + 256 LDU // _13 _16 _18 _20 ds + 64 LDU // _13 _16 _18 _20 _23 ds + 64 LDU // _13 _16 _18 _20 _23 _26 ds + LDGRAMS // _13 _16 _18 _20 _23 _26 _29 ds + 32 LDU // _13 _16 _18 _20 _23 _26 _29 _31 ds + 32 LDU // active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time rest + get_client_data CALLDICT // active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time client torrent_hash + }> + get_torrent_hash PROC:<{ + // + get_storage_contract_data CALLDICT // _12 _13 _14 _15 _16 _17 _18 _19 _20 _21 _22 + 10 1 BLKDROP2 // torrent_hash + }> + is_active PROC:<{ + // + c4 PUSH // _0 + CTOS // _1 + 1 PLDI // _3 + }> + get_next_proof_info PROC:<{ + // + get_storage_contract_data CALLDICT // _12 _13 _14 _15 _16 _17 _18 _19 _20 _21 _22 + 2DROP + s2 POP + 5 3 BLKDROP2 // next_proof last_proof_time max_span + }> +}END>c diff --git a/storage/storage-daemon/smartcont/storage-provider-code.boc b/storage/storage-daemon/smartcont/storage-provider-code.boc new file mode 100644 index 00000000..e7cee4ff Binary files /dev/null and b/storage/storage-daemon/smartcont/storage-provider-code.boc differ diff --git a/storage/storage-daemon/smartcont/storage-provider.fc b/storage/storage-daemon/smartcont/storage-provider.fc new file mode 100644 index 00000000..17223833 --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-provider.fc @@ -0,0 +1,227 @@ +;; Storage contract fabric + +#include "constants.fc"; + +const min_deploy_amount = 50000000; + +cell storage_contract_code() asm """ "storage-contract-code.boc" file>B B>boc PUSHREF """; + +slice calculate_address_by_stateinit(cell state_init) { + return begin_cell().store_uint(4, 3) + .store_int(0, 8) + .store_uint(cell_hash(state_init), 256) + .end_cell() + .begin_parse(); +} + +cell build_storage_contract_stateinit(int merkle_hash, int file_size, int rate_per_mb_day, + int max_span, slice client, int torrent_hash) { + cell data = begin_cell() + .store_int(0, 1) ;; active + .store_coins(0) ;; client balance + .store_slice(my_address()) + .store_uint(merkle_hash, 256) + .store_uint(file_size, 64) + .store_uint(0, 64) ;; next_proof + .store_coins(rate_per_mb_day) + .store_uint(max_span, 32) + .store_uint(now(), 32) ;; last_proof_time + .store_ref(begin_cell() + .store_slice(client) + .store_uint(torrent_hash, 256) + .end_cell()) + .end_cell(); + + cell state_init = begin_cell() + .store_uint(0, 2) + .store_maybe_ref(storage_contract_code()) + .store_maybe_ref(data) + .store_uint(0, 1) .end_cell(); + return state_init; +} + +() deploy_storage_contract (slice client, int query_id, int file_size, int merkle_hash, int torrent_hash, + int expected_rate, int expected_max_span) impure { + var ds = get_data().begin_parse(); + var (wallet_data, + accept_new_contracts?, + rate_per_mb_day, + max_span, + minimal_file_size, + maximal_file_size) = (ds~load_bits(32 + 32 + 256), + ds~load_int(1), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(64), + ds~load_uint(64)); + throw_unless(error::no_new_contracts, accept_new_contracts?); + throw_unless(error::file_too_small, file_size >= minimal_file_size); + throw_unless(error::file_too_big, file_size <= maximal_file_size); + throw_unless(error::provider_params_changed, expected_rate == rate_per_mb_day); + throw_unless(error::provider_params_changed, expected_max_span == max_span); + cell state_init = build_storage_contract_stateinit(merkle_hash, file_size, rate_per_mb_day, + max_span, client, torrent_hash); + cell msg = begin_cell() + .store_uint(0x18, 6) + .store_slice(calculate_address_by_stateinit(state_init)) + .store_coins(0) + .store_uint(4 + 2, 1 + 4 + 4 + 64 + 32 + 1 + 1 + 1) + .store_ref(state_init) + .store_uint(op::offer_storage_contract, 32) + .store_uint(query_id, 64) + .end_cell(); + send_raw_message(msg, 64); +} + +() recv_internal(int msg_value, cell in_msg_full, slice in_msg_body) impure { + slice cs = in_msg_full.begin_parse(); + int flags = cs~load_uint(4); + + if ((flags & 1) | in_msg_body.slice_empty?()) { ;; ignore all bounced and empty messages + return (); + } + slice sender_address = cs~load_msg_addr(); + + int op = in_msg_body~load_uint(32); + if (op == 0) { ;; transfer with text message + return (); + } + int query_id = in_msg_body~load_uint(64); + + if(op == op::offer_storage_contract) { + throw_unless(error::not_enough_money, msg_value >= min_deploy_amount); + ;; torrent_info piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) + ;; microchunk_hash:(Maybe (## 256)) description:Text = TorrentInfo; + ;; + ;; new_storage_contract#00000001 query_id:uint64 info:(^ TorrentInfo) microchunk_hash:uint256 + ;; expected_rate:Coins expected_max_span:uint32 = NewStorageContract; + cell torrent_info = in_msg_body~load_ref(); + int torrent_hash = cell_hash(torrent_info); + slice info_cs = torrent_info.begin_parse(); + info_cs~skip_bits(32); + int file_size = info_cs~load_uint(64); + int merkle_hash = in_msg_body~load_uint(256); + + int expected_rate = in_msg_body~load_coins(); + int expected_max_span = in_msg_body~load_uint(32); + deploy_storage_contract(sender_address, query_id, file_size, merkle_hash, torrent_hash, + expected_rate, expected_max_span); + return (); + } + if(op == op::storage_contract_terminated) { + return (); + } + + if(op == op::update_pubkey) { + if(~ equal_slice_bits(my_address(), sender_address)) { + return (); + } + var ds = get_data().begin_parse(); + var (seqno_subwallet, + _, + non_wallet_data) = (ds~load_bits(32 + 32), + ds~load_uint(256), + ds); + int new_pubkey = in_msg_body~load_uint(256); + set_data(begin_cell() + .store_slice(seqno_subwallet) + .store_uint(new_pubkey, 256) + .store_slice(non_wallet_data) + .end_cell()); + } + if(op == op::update_storage_params) { + if(~ equal_slice_bits(my_address(), sender_address)) { + return (); + } + var ds = get_data().begin_parse(); + var wallet_data = ds~load_bits(32 + 32 + 256); + var(accept_new_contracts?, + rate_per_mb_day, + max_span, + minimal_file_size, + maximal_file_size) = (in_msg_body~load_int(1), + in_msg_body~load_coins(), + in_msg_body~load_uint(32), + in_msg_body~load_uint(64), + in_msg_body~load_uint(64)); + set_data(begin_cell() + .store_slice(wallet_data) + .store_int(accept_new_contracts?, 1) + .store_coins(rate_per_mb_day) + .store_uint(max_span, 32) + .store_uint(minimal_file_size, 64) + .store_uint(maximal_file_size, 64) + .end_cell()); + } +} + +() recv_external(slice in_msg) impure { + var signature = in_msg~load_bits(512); + var cs = in_msg; + var (subwallet_id, valid_until, msg_seqno) = (cs~load_uint(32), cs~load_uint(32), cs~load_uint(32)); + throw_if(35, valid_until <= now()); + var ds = get_data().begin_parse(); + var (stored_seqno, + stored_subwallet, + public_key, + non_wallet_data) = (ds~load_uint(32), + ds~load_uint(32), + ds~load_uint(256), + ds); + throw_unless(33, msg_seqno == stored_seqno); + throw_unless(34, subwallet_id == stored_subwallet); + throw_unless(35, check_signature(slice_hash(in_msg), signature, public_key)); + accept_message(); + cs~touch(); + while (cs.slice_refs()) { + var mode = cs~load_uint(8); + send_raw_message(cs~load_ref(), mode); + } + set_data(begin_cell() + .store_uint(stored_seqno + 1, 32) + .store_uint(stored_subwallet, 32) + .store_uint(public_key, 256) + .store_slice(non_wallet_data) + .end_cell()); +} + +;; Get methods + +int seqno() method_id { + return get_data().begin_parse().preload_uint(32); +} + +int get_public_key() method_id { + var cs = get_data().begin_parse(); + cs~load_uint(64); + return cs.preload_uint(256); +} + +;; seqno, subwallet, key +_ get_wallet_params() method_id { + var ds = get_data().begin_parse(); + var (stored_seqno, stored_subwallet, public_key) = (ds~load_uint(32), ds~load_uint(32), ds~load_uint(256)); + return (stored_seqno, stored_subwallet, public_key); +} + +_ get_storage_params() method_id { + var ds = get_data().begin_parse(); + var (wallet_data, + accept_new_contracts?, + rate_per_mb_day, + max_span, + minimal_file_size, + maximal_file_size) = (ds~load_bits(32 + 32 + 256), + ds~load_int(1), + ds~load_coins(), + ds~load_uint(32), + ds~load_uint(64), + ds~load_uint(64)); + return (accept_new_contracts?, rate_per_mb_day, max_span, minimal_file_size, maximal_file_size); +} + +slice get_storage_contract_address(int merkle_hash, int file_size, slice client, int torrent_hash) method_id { + var (_, rate_per_mb_day, max_span, _, _) = get_storage_params(); + cell state_init = build_storage_contract_stateinit(merkle_hash, file_size, rate_per_mb_day, max_span, client, torrent_hash); + return calculate_address_by_stateinit(state_init); +} diff --git a/storage/storage-daemon/smartcont/storage-provider.fif b/storage/storage-daemon/smartcont/storage-provider.fif new file mode 100644 index 00000000..28dddc7c --- /dev/null +++ b/storage/storage-daemon/smartcont/storage-provider.fif @@ -0,0 +1,340 @@ +"Asm.fif" include +// automatically generated from `../../../crypto/smartcont/stdlib.fc` `storage-provider.fc` incl:`constants.fc` +PROGRAM{ + DECLPROC calculate_address_by_stateinit + DECLPROC build_storage_contract_stateinit + DECLPROC deploy_storage_contract + DECLPROC recv_internal + DECLPROC recv_external + 85143 DECLMETHOD seqno + 78748 DECLMETHOD get_public_key + 130271 DECLMETHOD get_wallet_params + 104346 DECLMETHOD get_storage_params + 119729 DECLMETHOD get_storage_contract_address + calculate_address_by_stateinit PROC:<{ + // state_init + HASHCU // _1 + 0 PUSHINT // _1 _2=0 + 4 PUSHINT // _1 _2=0 _3=4 + NEWC // _1 _2=0 _3=4 _4 + 3 STU // _1 _2=0 _6 + 8 STI // _1 _8 + 256 STU // _10 + ENDC // _11 + CTOS // _12 + }> + build_storage_contract_stateinit PROC:<{ + // merkle_hash file_size rate_per_mb_day max_span client torrent_hash + NEWC + ROT // merkle_hash file_size rate_per_mb_day max_span torrent_hash _7 client + STSLICER // merkle_hash file_size rate_per_mb_day max_span torrent_hash _8 + 256 STU // merkle_hash file_size rate_per_mb_day max_span _10 + ENDC // merkle_hash file_size rate_per_mb_day max_span _11 + NOW // merkle_hash file_size rate_per_mb_day max_span _11 _12 + 0 PUSHINT // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 + DUP // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _14=0 + NEWC // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _14=0 _15 + 1 STI // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _17 + OVER // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _17 _18=0 + STGRAMS // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _19 + MYADDR // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _19 _20 + STSLICER // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _21 + s1 s7 XCHG // _13=0 file_size rate_per_mb_day max_span _11 _12 merkle_hash _21 + 256 STU // _13=0 file_size rate_per_mb_day max_span _11 _12 _23 + s1 s5 XCHG // _13=0 _12 rate_per_mb_day max_span _11 file_size _23 + 64 STU // _13=0 _12 rate_per_mb_day max_span _11 _25 + s1 s5 XCHG // _11 _12 rate_per_mb_day max_span _13=0 _25 + 64 STU // _11 _12 rate_per_mb_day max_span _27 + ROT // _11 _12 max_span _27 rate_per_mb_day + STGRAMS // _11 _12 max_span _28 + 32 STU // _11 _12 _30 + 32 STU // _11 _32 + STREF // _33 + ENDC // data + 0 PUSHINT // data _36=0 + "storage-contract-code.boc" file>B B>boc PUSHREF // data _36=0 _37 + OVER // data _36=0 _37 _38=0 + NEWC // data _36=0 _37 _38=0 _39 + 2 STU // data _36=0 _37 _41 + STOPTREF // data _36=0 _42 + s1 s2 XCHG // _36=0 data _42 + STOPTREF // _36=0 _43 + 1 STU // _45 + ENDC // state_init + }> + deploy_storage_contract PROC:<{ + // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span + c4 PUSH // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _8 + CTOS // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds + 320 PUSHINT // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds _21 + LDSLICEX // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _91 _90 + NIP // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds + 1 LDI // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 ds + LDGRAMS // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 ds + 32 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 ds + 64 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 _31 ds + 64 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 _31 _101 _100 + DROP // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size + s0 s4 XCHG // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span minimal_file_size accept_new_contracts? + 1006 THROWIFNOT + s8 s(-1) PUXC // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span file_size minimal_file_size + GEQ // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span _40 + 1004 THROWIFNOT + s7 s2 PUXC // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span max_span rate_per_mb_day file_size maximal_file_size + LEQ // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span max_span rate_per_mb_day _43 + 1005 THROWIFNOT + s3 s3 XCPU // client query_id file_size merkle_hash torrent_hash rate_per_mb_day expected_max_span max_span expected_rate rate_per_mb_day + EQUAL // client query_id file_size merkle_hash torrent_hash rate_per_mb_day expected_max_span max_span _46 + 1009 THROWIFNOT + TUCK // client query_id file_size merkle_hash torrent_hash rate_per_mb_day max_span expected_max_span max_span + EQUAL // client query_id file_size merkle_hash torrent_hash rate_per_mb_day max_span _49 + 1009 THROWIFNOT + 2SWAP + s1 s5 XCHG + s1 s6 XCHG // query_id merkle_hash file_size rate_per_mb_day max_span client torrent_hash + build_storage_contract_stateinit CALLDICT // query_id state_init + 276580847 PUSHINT // query_id state_init _54=276580847 + 6 PUSHINT // query_id state_init _54=276580847 _57 + 24 PUSHINT // query_id state_init _54=276580847 _57 _58=24 + NEWC // query_id state_init _54=276580847 _57 _58=24 _59 + 6 STU // query_id state_init _54=276580847 _57 _61 + s3 PUSH // query_id state_init _54=276580847 _57 _61 state_init + calculate_address_by_stateinit CALLDICT // query_id state_init _54=276580847 _57 _61 _62 + STSLICER // query_id state_init _54=276580847 _57 _63 + 0 PUSHINT // query_id state_init _54=276580847 _57 _63 _64=0 + STGRAMS // query_id state_init _54=276580847 _57 _65 + 108 STU // query_id state_init _54=276580847 _81 + s1 s2 XCHG // query_id _54=276580847 state_init _81 + STREF // query_id _54=276580847 _82 + 32 STU // query_id _84 + 64 STU // _86 + ENDC // msg + 64 PUSHINT // msg _88=64 + SENDRAWMSG + }> + recv_internal PROC:<{ + SAMEALTSAVE // msg_value in_msg_full in_msg_body + SWAP // msg_value in_msg_body in_msg_full + CTOS // msg_value in_msg_body cs + 4 LDU // msg_value in_msg_body flags cs + SWAP + 1 PUSHINT // msg_value in_msg_body cs flags _9=1 + AND // msg_value in_msg_body cs _10 + s2 PUSH // msg_value in_msg_body cs _10 in_msg_body + SEMPTY // msg_value in_msg_body cs _10 _11 + OR // msg_value in_msg_body cs _12 + IFJMP:<{ // msg_value in_msg_body cs + 3 BLKDROP // + }> // msg_value in_msg_body cs + LDMSGADDR // msg_value in_msg_body _141 _140 + DROP // msg_value in_msg_body sender_address + SWAP // msg_value sender_address in_msg_body + 32 LDU // msg_value sender_address op in_msg_body + OVER // msg_value sender_address op in_msg_body op + 0 EQINT // msg_value sender_address op in_msg_body _21 + IFJMP:<{ // msg_value sender_address op in_msg_body + 4 BLKDROP // + }> // msg_value sender_address op in_msg_body + 64 LDU // msg_value sender_address op query_id in_msg_body + s2 PUSH + 276580847 PUSHINT // msg_value sender_address op query_id in_msg_body op _26=276580847 + EQUAL // msg_value sender_address op query_id in_msg_body _27 + IFJMP:<{ // msg_value sender_address op query_id in_msg_body + s2 POP // msg_value sender_address in_msg_body query_id + s0 s3 XCHG + 50000000 PUSHINT // query_id sender_address in_msg_body msg_value _29=50000000 + GEQ // query_id sender_address in_msg_body _30 + 1001 THROWIFNOT + LDREF // query_id sender_address torrent_info in_msg_body + OVER // query_id sender_address torrent_info in_msg_body torrent_info + HASHCU // query_id sender_address torrent_info in_msg_body torrent_hash + s0 s2 XCHG // query_id sender_address torrent_hash in_msg_body torrent_info + CTOS // query_id sender_address torrent_hash in_msg_body info_cs + 32 PUSHINT // query_id sender_address torrent_hash in_msg_body info_cs _40=32 + SDSKIPFIRST // query_id sender_address torrent_hash in_msg_body info_cs + 64 LDU // query_id sender_address torrent_hash in_msg_body _149 _148 + DROP // query_id sender_address torrent_hash in_msg_body file_size + SWAP // query_id sender_address torrent_hash file_size in_msg_body + 256 LDU // query_id sender_address torrent_hash file_size merkle_hash in_msg_body + LDGRAMS // query_id sender_address torrent_hash file_size merkle_hash expected_rate in_msg_body + 32 LDU // query_id sender_address torrent_hash file_size merkle_hash expected_rate _155 _154 + DROP // query_id sender_address torrent_hash file_size merkle_hash expected_rate expected_max_span + s5 s6 XCHG + s3 s4 XCHG + s2 s3 XCHG // sender_address query_id file_size merkle_hash torrent_hash expected_rate expected_max_span + deploy_storage_contract CALLDICT + }> // msg_value sender_address op query_id in_msg_body + NIP + s3 POP // in_msg_body sender_address op + DUP + 3055775075 PUSHINT // in_msg_body sender_address op op _58=3055775075 + EQUAL // in_msg_body sender_address op _59 + IFJMP:<{ // in_msg_body sender_address op + 3 BLKDROP // + }> // in_msg_body sender_address op + DUP + 1408453846 PUSHINT // in_msg_body sender_address op op _60=1408453846 + EQUAL // in_msg_body sender_address op _61 + IF:<{ // in_msg_body sender_address op + MYADDR // in_msg_body sender_address op _62 + s2 PUSH // in_msg_body sender_address op _62 sender_address + SDEQ // in_msg_body sender_address op _63 + NOT // in_msg_body sender_address op _64 + IFJMP:<{ // in_msg_body sender_address op + 3 BLKDROP // + RETALT + }> // in_msg_body sender_address op + c4 PUSH // in_msg_body sender_address op _66 + CTOS // in_msg_body sender_address op ds + 64 LDSLICE // in_msg_body sender_address op _71 ds + 256 LDU // in_msg_body sender_address op _71 _159 _158 + NIP // in_msg_body sender_address op seqno_subwallet non_wallet_data + s0 s4 XCHG // non_wallet_data sender_address op seqno_subwallet in_msg_body + 256 LDU // non_wallet_data sender_address op seqno_subwallet new_pubkey in_msg_body + NEWC // non_wallet_data sender_address op seqno_subwallet new_pubkey in_msg_body _83 + s0 s3 XCHG2 // non_wallet_data sender_address op in_msg_body new_pubkey _83 seqno_subwallet + STSLICER // non_wallet_data sender_address op in_msg_body new_pubkey _84 + 256 STU // non_wallet_data sender_address op in_msg_body _86 + s0 s4 XCHG2 // in_msg_body sender_address op _86 non_wallet_data + STSLICER // in_msg_body sender_address op _87 + ENDC // in_msg_body sender_address op _88 + c4 POP + }> // in_msg_body sender_address op + 1422651803 PUSHINT // in_msg_body sender_address op _90=1422651803 + EQUAL // in_msg_body sender_address _91 + IF:<{ // in_msg_body sender_address + MYADDR // in_msg_body sender_address _92 + SWAP // in_msg_body _92 sender_address + SDEQ // in_msg_body _93 + NOT // in_msg_body _94 + IFJMP:<{ // in_msg_body + DROP // + RETALT + }> // in_msg_body + c4 PUSH // in_msg_body _96 + CTOS // in_msg_body ds + 320 PUSHINT // in_msg_body ds _104 + LDSLICEX // in_msg_body _163 _162 + DROP // in_msg_body wallet_data + SWAP // wallet_data in_msg_body + 1 LDI // wallet_data _111 in_msg_body + LDGRAMS // wallet_data _111 _114 in_msg_body + 32 LDU // wallet_data _111 _114 _116 in_msg_body + 64 LDU // wallet_data _111 _114 _116 _119 in_msg_body + 64 LDU // wallet_data _111 _114 _116 _119 _173 _172 + DROP // wallet_data accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size + NEWC // wallet_data accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size _125 + s0 s6 XCHG2 // maximal_file_size accept_new_contracts? rate_per_mb_day max_span minimal_file_size _125 wallet_data + STSLICER // maximal_file_size accept_new_contracts? rate_per_mb_day max_span minimal_file_size _126 + s1 s4 XCHG // maximal_file_size minimal_file_size rate_per_mb_day max_span accept_new_contracts? _126 + 1 STI // maximal_file_size minimal_file_size rate_per_mb_day max_span _128 + ROT // maximal_file_size minimal_file_size max_span _128 rate_per_mb_day + STGRAMS // maximal_file_size minimal_file_size max_span _129 + 32 STU // maximal_file_size minimal_file_size _131 + 64 STU // maximal_file_size _133 + 64 STU // _135 + ENDC // _136 + c4 POP + }>ELSE<{ + 2DROP // + }> + }> + recv_external PROC:<{ + // in_msg + 9 PUSHPOW2 // in_msg _3=512 + LDSLICEX // signature in_msg + DUP // signature in_msg cs + 32 LDU // signature in_msg _9 cs + 32 LDU // signature in_msg _9 _12 cs + 32 LDU // signature in_msg subwallet_id valid_until msg_seqno cs + s0 s2 XCHG + NOW // signature in_msg subwallet_id cs msg_seqno valid_until _19 + LEQ // signature in_msg subwallet_id cs msg_seqno _20 + 35 THROWIF + c4 PUSH // signature in_msg subwallet_id cs msg_seqno _23 + CTOS // signature in_msg subwallet_id cs msg_seqno ds + 32 LDU // signature in_msg subwallet_id cs msg_seqno _29 ds + 32 LDU // signature in_msg subwallet_id cs msg_seqno _29 _32 ds + 256 LDU // signature in_msg subwallet_id cs msg_seqno stored_seqno stored_subwallet public_key non_wallet_data + s4 s3 XCPU // signature in_msg subwallet_id cs non_wallet_data stored_seqno stored_subwallet public_key msg_seqno stored_seqno + EQUAL // signature in_msg subwallet_id cs non_wallet_data stored_seqno stored_subwallet public_key _39 + 33 THROWIFNOT + s5 s1 XCPU // signature in_msg public_key cs non_wallet_data stored_seqno stored_subwallet subwallet_id stored_subwallet + EQUAL // signature in_msg public_key cs non_wallet_data stored_seqno stored_subwallet _42 + 34 THROWIFNOT + s0 s5 XCHG // signature stored_subwallet public_key cs non_wallet_data stored_seqno in_msg + HASHSU // signature stored_subwallet public_key cs non_wallet_data stored_seqno _45 + s0 s6 s4 XC2PU // stored_seqno stored_subwallet public_key cs non_wallet_data _45 signature public_key + CHKSIGNU // stored_seqno stored_subwallet public_key cs non_wallet_data _46 + 35 THROWIFNOT + ACCEPT + SWAP // stored_seqno stored_subwallet public_key non_wallet_data cs + WHILE:<{ + DUP // stored_seqno stored_subwallet public_key non_wallet_data cs cs + SREFS // stored_seqno stored_subwallet public_key non_wallet_data cs _51 + }>DO<{ // stored_seqno stored_subwallet public_key non_wallet_data cs + 8 LDU // stored_seqno stored_subwallet public_key non_wallet_data mode cs + LDREF // stored_seqno stored_subwallet public_key non_wallet_data mode _56 cs + s0 s2 XCHG // stored_seqno stored_subwallet public_key non_wallet_data cs _56 mode + SENDRAWMSG + }> // stored_seqno stored_subwallet public_key non_wallet_data cs + DROP // stored_seqno stored_subwallet public_key non_wallet_data + s0 s3 XCHG // non_wallet_data stored_subwallet public_key stored_seqno + INC // non_wallet_data stored_subwallet public_key _60 + NEWC // non_wallet_data stored_subwallet public_key _60 _61 + 32 STU // non_wallet_data stored_subwallet public_key _63 + s1 s2 XCHG // non_wallet_data public_key stored_subwallet _63 + 32 STU // non_wallet_data public_key _65 + 256 STU // non_wallet_data _67 + SWAP // _67 non_wallet_data + STSLICER // _68 + ENDC // _69 + c4 POP + }> + seqno PROC:<{ + // + c4 PUSH // _0 + CTOS // _1 + 32 PLDU // _3 + }> + get_public_key PROC:<{ + // + c4 PUSH // _1 + CTOS // cs + 64 LDU // _9 _8 + NIP // cs + 256 PLDU // _7 + }> + get_wallet_params PROC:<{ + // + c4 PUSH // _1 + CTOS // ds + 32 LDU // _6 ds + 32 LDU // _6 _9 ds + 256 LDU // _6 _9 _20 _19 + DROP // stored_seqno stored_subwallet public_key + }> + get_storage_params PROC:<{ + // + c4 PUSH // _1 + CTOS // ds + 320 PUSHINT // ds _14 + LDSLICEX // _31 _30 + NIP // ds + 1 LDI // _16 ds + LDGRAMS // _16 _19 ds + 32 LDU // _16 _19 _21 ds + 64 LDU // _16 _19 _21 _24 ds + 64 LDU // _16 _19 _21 _24 _41 _40 + DROP // accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size + }> + get_storage_contract_address PROC:<{ + // merkle_hash file_size client torrent_hash + get_storage_params CALLDICT // merkle_hash file_size client torrent_hash _13 _14 _15 _16 _17 + 2DROP + s2 POP // merkle_hash file_size client torrent_hash max_span rate_per_mb_day + s1 s3 s3 XCHG3 // merkle_hash file_size rate_per_mb_day max_span client torrent_hash + build_storage_contract_stateinit CALLDICT // state_init + calculate_address_by_stateinit CALLDICT // _12 + }> +}END>c diff --git a/storage/storage-daemon/smc-util.cpp b/storage/storage-daemon/smc-util.cpp new file mode 100644 index 00000000..32ba6227 --- /dev/null +++ b/storage/storage-daemon/smc-util.cpp @@ -0,0 +1,487 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#include "smc-util.h" +#include "td/utils/filesystem.h" +#include "keys/encryptor.h" +#include "smartcont/provider-code.h" + +static void smc_forget(td::actor::ActorId client, td::int64 id) { + auto query = create_tl_object(id); + td::actor::send_closure(client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [](td::Result> R) mutable { + if (R.is_error()) { + LOG(WARNING) << "smc_forget failed: " << R.move_as_error(); + } + }); +} + +void run_get_method(ContractAddress address, td::actor::ActorId client, std::string method, + std::vector> args, + td::Promise>> promise) { + LOG(DEBUG) << "Running get method " << method << " on " << address.to_string(); + auto query = + create_tl_object(create_tl_object(address.to_string())); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, method = std::move(method), args = std::move(args), + promise = std::move(promise)](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, obj, std::move(R)); + auto query = create_tl_object( + obj->id_, create_tl_object(std::move(method)), std::move(args)); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, id = obj->id_, + promise = std::move(promise)](td::Result> R) mutable { + smc_forget(client, id); + TRY_RESULT_PROMISE(promise, obj, std::move(R)); + if (obj->exit_code_ != 0 && obj->exit_code_ != 1) { + promise.set_error( + td::Status::Error(PSTRING() << "Method execution finished with code " << obj->exit_code_)); + return; + } + promise.set_result(std::move(obj->stack_)); + }); + }); +} + +void check_contract_exists(ContractAddress address, td::actor::ActorId client, + td::Promise promise) { + auto query = + create_tl_object(create_tl_object(address.to_string())); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, promise = std::move(promise)](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, obj, std::move(R)); + auto query = create_tl_object(obj->id_); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [client, id = obj->id_, + promise = std::move(promise)](td::Result> R) mutable { + smc_forget(client, id); + TRY_RESULT_PROMISE(promise, r, std::move(R)); + promise.set_result(!r->bytes_.empty()); + }); + }); +} + +void get_contract_balance(ContractAddress address, td::actor::ActorId client, + td::Promise promise) { + auto query = + create_tl_object(create_tl_object(address.to_string())); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, std::move(query), + promise.wrap([](tonlib_api::object_ptr r) -> td::Result { + return td::make_refint(r->balance_); + })); +} + +FabricContractWrapper::FabricContractWrapper(ContractAddress address, + td::actor::ActorId client, + td::actor::ActorId keyring, + td::unique_ptr callback, td::uint64 last_processed_lt) + : address_(address) + , client_(std::move(client)) + , keyring_(std::move(keyring)) + , callback_(std::move(callback)) + , last_processed_lt_(last_processed_lt) { +} + +void FabricContractWrapper::start_up() { + alarm(); +} + +void FabricContractWrapper::alarm() { + if (process_transactions_at_ && process_transactions_at_.is_in_past()) { + process_transactions_at_ = td::Timestamp::never(); + load_transactions(); + } + alarm_timestamp().relax(process_transactions_at_); + if (send_message_at_ && send_message_at_.is_in_past()) { + send_message_at_ = td::Timestamp::never(); + do_send_external_message(); + } + alarm_timestamp().relax(send_message_at_); +} + +void FabricContractWrapper::load_transactions() { + LOG(DEBUG) << "Loading transactions for " << address_.to_string() << ", last_lt=" << last_processed_lt_; + auto query = + create_tl_object(create_tl_object(address_.to_string())); + td::actor::send_closure( + client_, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [SelfId = actor_id(this)](td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, R.move_as_error()); + return; + } + auto obj = R.move_as_ok(); + td::actor::send_closure(SelfId, &FabricContractWrapper::load_last_transactions, + std::vector>(), + std::move(obj->last_transaction_id_), (td::uint32)obj->sync_utime_); + }); +} + +void FabricContractWrapper::load_last_transactions(std::vector> transactions, + tl_object_ptr next_id, + td::uint32 utime) { + if ((td::uint64)next_id->lt_ <= last_processed_lt_) { + loaded_last_transactions(std::make_pair(std::move(transactions), utime)); + return; + } + auto query = create_tl_object( + nullptr, create_tl_object(address_.to_string()), std::move(next_id), 10, false); + td::actor::send_closure( + client_, &tonlib::TonlibClientWrapper::send_request, std::move(query), + [transactions = std::move(transactions), last_processed_lt = last_processed_lt_, SelfId = actor_id(this), + utime](td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, R.move_as_error()); + return; + } + auto obj = R.move_as_ok(); + for (auto& transaction : obj->transactions_) { + if ((td::uint64)transaction->transaction_id_->lt_ <= last_processed_lt || + (double)transaction->utime_ < td::Clocks::system() - 86400 || transactions.size() >= 1000) { + LOG(DEBUG) << "Stopping loading transactions (too many or too old)"; + td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, + std::make_pair(std::move(transactions), utime)); + return; + } + LOG(DEBUG) << "Adding trtansaction, lt=" << transaction->transaction_id_->lt_; + transactions.push_back(std::move(transaction)); + } + td::actor::send_closure(SelfId, &FabricContractWrapper::load_last_transactions, std::move(transactions), + std::move(obj->previous_transaction_id_), utime); + }); +} + +void FabricContractWrapper::loaded_last_transactions( + td::Result>, td::uint32>> R) { + if (R.is_error()) { + LOG(ERROR) << "Error during loading last transactions: " << R.move_as_error(); + alarm_timestamp().relax(process_transactions_at_ = td::Timestamp::in(30.0)); + return; + } + auto r = R.move_as_ok(); + auto transactions = std::move(r.first); + td::uint32 utime = r.second; + LOG(DEBUG) << "Finished loading " << transactions.size() << " transactions. sync_utime=" << utime; + std::reverse(transactions.begin(), transactions.end()); + for (tl_object_ptr& transaction : transactions) { + LOG(DEBUG) << "Processing transaction tl=" << transaction->transaction_id_->lt_; + last_processed_lt_ = transaction->transaction_id_->lt_; + // transaction->in_msg_->source_->account_address_.empty() - message is external + if (current_ext_message_ && current_ext_message_.value().sent && + transaction->in_msg_->source_->account_address_.empty()) { + auto msg_data = dynamic_cast(transaction->in_msg_->msg_data_.get()); + if (msg_data == nullptr) { + continue; + } + auto r_body = vm::std_boc_deserialize(msg_data->body_); + if (r_body.is_error()) { + LOG(WARNING) << "Invalid response from tonlib: " << r_body.move_as_error(); + continue; + } + td::Ref body = r_body.move_as_ok(); + vm::CellSlice cs(vm::NoVm(), body); + if (cs.size() < 512 + 96) { + continue; + } + cs.skip_first(512 + 64); + auto seqno = (td::uint32)cs.fetch_ulong(32); + if (seqno != current_ext_message_.value().seqno) { + continue; + } + if (current_ext_message_.value().ext_msg_body_hash != body->get_hash().bits()) { + do_send_external_message_finish(td::Status::Error("Another external message with the same seqno was accepted")); + continue; + } + do_send_external_message_finish(&transaction->out_msgs_); + } + } + for (tl_object_ptr& transaction : transactions) { + callback_->on_transaction(std::move(transaction)); + } + if (current_ext_message_ && current_ext_message_.value().sent && current_ext_message_.value().timeout < utime) { + do_send_external_message_finish(td::Status::Error("Timeout")); + } + alarm_timestamp().relax(process_transactions_at_ = td::Timestamp::in(10.0)); +} + +void FabricContractWrapper::run_get_method( + std::string method, std::vector> args, + td::Promise>> promise) { + ::run_get_method(address_, client_, std::move(method), std::move(args), std::move(promise)); +} + +void FabricContractWrapper::send_internal_message(ContractAddress dest, td::RefInt256 coins, vm::CellSlice body, + td::Promise promise) { + td::Bits256 body_hash = vm::CellBuilder().append_cellslice(body).finalize_novm()->get_hash().bits(); + LOG(DEBUG) << "send_internal_message " << address_.to_string() << " -> " << dest.to_string() << ", " << coins + << " nanoTON, body=" << body_hash.to_hex(); + CHECK(coins->sgn() >= 0); + pending_messages_.push(PendingMessage{dest, std::move(coins), std::move(body), body_hash, std::move(promise)}); + if (!send_message_at_ && !current_ext_message_) { + alarm_timestamp().relax(send_message_at_ = td::Timestamp::in(1.0)); + } +} + +void FabricContractWrapper::do_send_external_message() { + CHECK(!current_ext_message_); + LOG(DEBUG) << "do_send_external message: " << pending_messages_.size() << " messages in queue"; + if (pending_messages_.empty()) { + return; + } + current_ext_message_ = CurrentExtMessage(); + while (current_ext_message_.value().int_msgs.size() < 4 && !pending_messages_.empty()) { + PendingMessage msg = std::move(pending_messages_.front()); + current_ext_message_.value().int_msgs.push_back(std::move(msg)); + pending_messages_.pop(); + } + run_get_method( + "get_wallet_params", {}, + [SelfId = actor_id(this)](td::Result>> R) { + td::uint32 seqno = 0; + td::uint32 subwallet_id = 0; + td::Bits256 public_key = td::Bits256::zero(); + auto S = [&]() -> td::Status { + TRY_RESULT(stack, std::move(R)); + if (stack.size() != 3) { + return td::Status::Error(PSTRING() << "Method returned " << stack.size() << " values, 3 expected"); + } + TRY_RESULT_PREFIX_ASSIGN(seqno, entry_to_int(stack[0]), "Invalid seqno: "); + TRY_RESULT_PREFIX_ASSIGN(subwallet_id, entry_to_int(stack[1]), "Invalid subwallet_id: "); + TRY_RESULT_PREFIX_ASSIGN(public_key, entry_to_bits256(stack[2]), "Invalid public_key: "); + return td::Status::OK(); + }(); + if (S.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish, + S.move_as_error_prefix("Failed to get wallet params: ")); + return; + } + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_cont, seqno, subwallet_id, + public_key); + }); +} + +void FabricContractWrapper::do_send_external_message_cont(td::uint32 seqno, td::uint32 subwallet_id, + td::Bits256 public_key) { + LOG(DEBUG) << "Got wallet params: seqno=" << seqno << ", subwallet_id=" << subwallet_id + << ", key=" << public_key.to_hex(); + CHECK(current_ext_message_); + current_ext_message_.value().seqno = seqno; + current_ext_message_.value().timeout = (td::uint32)td::Clocks::system() + 45; + vm::CellBuilder b; + b.store_long(subwallet_id, 32); // subwallet id. + b.store_long(current_ext_message_.value().timeout, 32); // valid until + b.store_long(seqno, 32); // seqno + for (const PendingMessage& msg : current_ext_message_.value().int_msgs) { + vm::CellBuilder b2; + b2.store_long(3 << 2, 6); // 0 ihr_disabled:Bool bounce:Bool bounced:Bool src:MsgAddressInt + b2.append_cellslice(msg.dest.to_cellslice()); // dest:MsgAddressInt + store_coins(b2, msg.value); // grams:Grams + b2.store_zeroes(1 + 4 + 4 + 64 + 32 + 1); // extre, ihr_fee, fwd_fee, created_lt, created_at, init + // body:(Either X ^X) + if (b2.remaining_bits() >= 1 + msg.body.size() && b2.remaining_refs() >= msg.body.size_refs()) { + b2.store_zeroes(1); + b2.append_cellslice(msg.body); + } else { + b2.store_ones(1); + b2.store_ref(vm::CellBuilder().append_cellslice(msg.body).finalize_novm()); + } + b.store_long(3, 8); // mode + b.store_ref(b2.finalize_novm()); // message + } + td::Ref to_sign = b.finalize_novm(); + td::BufferSlice hash(to_sign->get_hash().as_slice()); + LOG(DEBUG) << "Signing external message"; + td::actor::send_closure( + keyring_, &keyring::Keyring::sign_message, PublicKey(pubkeys::Ed25519(public_key)).compute_short_id(), + std::move(hash), [SelfId = actor_id(this), data = std::move(to_sign)](td::Result R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish, + R.move_as_error_prefix("Failed to sign message: ")); + return; + } + auto signature = R.move_as_ok(); + CHECK(signature.size() == 64); + vm::CellBuilder b; + b.store_bytes(signature); + b.append_cellslice(vm::load_cell_slice(data)); + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_cont2, b.finalize_novm()); + }); +} + +void FabricContractWrapper::do_send_external_message_cont2(td::Ref ext_msg_body) { + CHECK(current_ext_message_); + LOG(DEBUG) << "Signed external message, sending: seqno=" << current_ext_message_.value().seqno; + current_ext_message_.value().sent = true; + current_ext_message_.value().ext_msg_body_hash = ext_msg_body->get_hash().bits(); + auto body = vm::std_boc_serialize(ext_msg_body).move_as_ok().as_slice().str(); + auto query = create_tl_object( + create_tl_object(address_.to_string()), "", std::move(body)); + td::actor::send_closure(client_, &tonlib::TonlibClientWrapper::send_request, + std::move(query), [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish, + R.move_as_error_prefix("Failed to send message: ")); + } else { + LOG(DEBUG) << "External message was sent to liteserver"; + } + }); +} + +void FabricContractWrapper::do_send_external_message_finish( + td::Result>*> R) { + CHECK(current_ext_message_); + if (R.is_error()) { + LOG(DEBUG) << "Failed to send external message seqno=" << current_ext_message_.value().seqno << ": " << R.error(); + for (auto& msg : current_ext_message_.value().int_msgs) { + msg.promise.set_error(R.error().clone()); + } + } else { + LOG(DEBUG) << "External message seqno=" << current_ext_message_.value().seqno << " was sent"; + const auto& out_msgs = *R.ok(); + auto& msgs = current_ext_message_.value().int_msgs; + for (const auto& out_msg : out_msgs) { + ContractAddress dest = ContractAddress::parse(out_msg->destination_->account_address_).move_as_ok(); + td::RefInt256 value = td::make_refint((td::uint64)out_msg->value_); + td::Bits256 body_hash; + body_hash.as_slice().copy_from(out_msg->body_hash_); + bool found = false; + for (size_t i = 0; i < msgs.size(); ++i) { + if (msgs[i].dest == dest && msgs[i].value->cmp(*value) == 0 && msgs[i].body_hash == body_hash) { + LOG(DEBUG) << "Internal message was sent dest=" << dest.to_string() << ", value=" << value + << ", body_hash=" << body_hash.to_hex(); + msgs[i].promise.set_result(td::Unit()); + msgs.erase(msgs.begin() + i); + found = true; + break; + } + } + if (!found) { + LOG(DEBUG) << "Unexpected internal message was sent: dest=" << dest.to_string() << " value=" << value + << " body_hash=" << body_hash; + } + } + for (auto& msg : msgs) { + LOG(DEBUG) << "Internal message WAS NOT SENT dest=" << msg.dest.to_string() << ", value=" << msg.value + << ", body_hash=" << msg.body_hash.to_hex(); + msg.promise.set_result(td::Status::Error("External message was accepted, but internal message was not sent")); + } + } + current_ext_message_ = {}; + if (!pending_messages_.empty()) { + do_send_external_message(); + } +} + +bool store_coins(vm::CellBuilder& b, const td::RefInt256& x) { + unsigned len = (((unsigned)x->bit_size(false) + 7) >> 3); + if (len >= 16) { + return false; + } + return b.store_long_bool(len, 4) && b.store_int256_bool(*x, len * 8, false); +} + +bool store_coins(vm::CellBuilder& b, td::uint64 x) { + return store_coins(b, td::make_refint(x)); +} + +td::Result generate_fabric_contract(td::actor::ActorId keyring) { + auto private_key = PrivateKey{privkeys::Ed25519::random()}; + td::Bits256 public_key = private_key.compute_public_key().ed25519_value().raw(); + + td::Slice code_boc(STORAGE_PROVIDER_CODE, sizeof(STORAGE_PROVIDER_CODE)); + TRY_RESULT(code, vm::std_boc_deserialize(code_boc)); + + LOG(DEBUG) << "Generating storage provider state init. code_hash=" << code->get_hash().to_hex() + << " public_key=" << public_key.to_hex(); + + vm::CellBuilder b; + b.store_long(0, 32); // seqno + b.store_long(0, 32); // subwallet_id + b.store_bytes(public_key.as_slice()); // public_key + b.store_long(0, 1); // accept_new_contracts (false by default) + store_coins(b, 1'000'000); // rate_per_mb_day + b.store_long(86400, 32); // max_span + b.store_long(1 << 20, 64); // min_file_size + b.store_long(1 << 30, 64); // max_file_size + td::Ref data = b.finalize_novm(); + + // _ split_depth:(Maybe (## 5)) special:(Maybe TickTock) + // code:(Maybe ^Cell) data:(Maybe ^Cell) + // library:(HashmapE 256 SimpleLib) = StateInit; + td::Ref state_init = + vm::CellBuilder().store_long(0b00110, 5).store_ref(std::move(code)).store_ref(std::move(data)).finalize_novm(); + ContractAddress address{basechainId, state_init->get_hash().bits()}; + + // Message body + b = vm::CellBuilder(); + b.store_long(0, 32); // subwallet_id + b.store_long((td::uint32)td::Clocks::system() + 3600 * 24 * 7, 32); // valid_until + b.store_long(0, 32); // seqno + td::Ref to_sign = b.finalize_novm(); + TRY_RESULT(decryptor, private_key.create_decryptor()); + TRY_RESULT(signature, decryptor->sign(to_sign->get_hash().as_slice())); + CHECK(signature.size() == 64); + td::Ref msg_body = + vm::CellBuilder().store_bytes(signature).append_cellslice(vm::CellSlice(vm::NoVm(), to_sign)).finalize_novm(); + + td::actor::send_closure(keyring, &keyring::Keyring::add_key, private_key, false, + [](td::Result R) { R.ensure(); }); + return FabricContractInit{address, state_init, msg_body}; +} + +td::Ref create_new_contract_message_body(td::Ref info, td::Bits256 microchunk_hash, + td::uint64 query_id, td::RefInt256 rate, td::uint32 max_span) { + // new_storage_contract#00000001 query_id:uint64 info:(^ TorrentInfo) microchunk_hash:uint256 + // expected_rate:Coins expected_max_span:uint32 = NewStorageContract; + vm::CellBuilder b; + b.store_long(0x107c49ef, 32); // const op::offer_storage_contract = 0x107c49ef; + b.store_long(query_id, 64); + b.store_ref(std::move(info)); + b.store_bytes(microchunk_hash.as_slice()); + store_coins(b, rate); + b.store_long(max_span, 32); + return b.finalize_novm(); +} + +void get_storage_contract_data(ContractAddress address, td::actor::ActorId client, + td::Promise promise) { + run_get_method( + address, client, "get_storage_contract_data", {}, + promise.wrap([](std::vector> stack) -> td::Result { + if (stack.size() < 11) { + return td::Status::Error("Too few entries"); + } + // active, balance, provider, merkle_hash, file_size, next_proof, rate_per_mb_day, max_span, last_proof_time, + // client, torrent_hash + TRY_RESULT(active, entry_to_int(stack[0])); + TRY_RESULT(balance, entry_to_int(stack[1])); + TRY_RESULT(microchunk_hash, entry_to_bits256(stack[3])); + TRY_RESULT(file_size, entry_to_int(stack[4])); + TRY_RESULT(next_proof, entry_to_int(stack[5])); + TRY_RESULT(rate_per_mb_day, entry_to_int(stack[6])); + TRY_RESULT(max_span, entry_to_int(stack[7])); + TRY_RESULT(last_proof_time, entry_to_int(stack[8])); + TRY_RESULT(torrent_hash, entry_to_bits256(stack[10])); + return StorageContractData{(bool)active, balance, microchunk_hash, file_size, next_proof, + rate_per_mb_day, max_span, last_proof_time, torrent_hash}; + })); +} diff --git a/storage/storage-daemon/smc-util.h b/storage/storage-daemon/smc-util.h new file mode 100644 index 00000000..0b921d4d --- /dev/null +++ b/storage/storage-daemon/smc-util.h @@ -0,0 +1,184 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ + +#pragma once +#include "ton/ton-types.h" +#include "crypto/vm/cellslice.h" +#include "block/block-parse.h" +#include "td/actor/actor.h" +#include "tonlib/tonlib/TonlibClientWrapper.h" +#include +#include "keyring/keyring.h" + +using namespace ton; + +struct ContractAddress { + WorkchainId wc = workchainIdNotYet; + td::Bits256 addr = td::Bits256::zero(); + + ContractAddress() = default; + ContractAddress(WorkchainId wc, td::Bits256 addr) : wc(wc), addr(addr) { + } + + std::string to_string() const { + return PSTRING() << wc << ":" << addr.to_hex(); + } + td::Ref to_cellslice() const { + return block::tlb::t_MsgAddressInt.pack_std_address(wc, addr); + } + + static td::Result parse(td::Slice s) { + TRY_RESULT(x, block::StdAddress::parse(s)); + return ContractAddress(x.workchain, x.addr); + } + + bool operator==(const ContractAddress& other) const { + return wc == other.wc && addr == other.addr; + } + bool operator!=(const ContractAddress& other) const { + return !(*this == other); + } + bool operator<(const ContractAddress& other) const { + return wc == other.wc ? addr < other.addr : wc < other.wc; + } +}; + +void run_get_method(ContractAddress address, td::actor::ActorId client, std::string method, + std::vector> args, + td::Promise>> promise); +void check_contract_exists(ContractAddress address, td::actor::ActorId client, + td::Promise promise); +void get_contract_balance(ContractAddress address, td::actor::ActorId client, + td::Promise promise); + +class FabricContractWrapper : public td::actor::Actor { + public: + class Callback { + public: + virtual ~Callback() = default; + virtual void on_transaction(tl_object_ptr transaction) = 0; + }; + + explicit FabricContractWrapper(ContractAddress address, td::actor::ActorId client, + td::actor::ActorId keyring, td::unique_ptr callback, + td::uint64 last_processed_lt); + + void start_up() override; + void alarm() override; + + void run_get_method(std::string method, std::vector> args, + td::Promise>> promise); + void send_internal_message(ContractAddress dest, td::RefInt256 coins, vm::CellSlice body, + td::Promise promise); + + private: + ContractAddress address_; + td::actor::ActorId client_; + td::actor::ActorId keyring_; + td::unique_ptr callback_; + + td::Timestamp process_transactions_at_ = td::Timestamp::now(); + td::uint64 last_processed_lt_ = 0; + + struct PendingMessage { + ContractAddress dest; + td::RefInt256 value; + vm::CellSlice body; + td::Bits256 body_hash; + td::Promise promise; + }; + struct CurrentExtMessage { + std::vector int_msgs; + td::uint32 seqno = 0; + bool sent = false; + td::Bits256 ext_msg_body_hash = td::Bits256::zero(); + td::uint32 timeout = 0; + }; + std::queue pending_messages_; + td::Timestamp send_message_at_ = td::Timestamp::never(); + td::optional current_ext_message_; + + void load_transactions(); + void load_last_transactions(std::vector> transactions, + tl_object_ptr next_id, td::uint32 utime); + void loaded_last_transactions( + td::Result>, td::uint32>> R); + + void do_send_external_message(); + void do_send_external_message_cont(td::uint32 seqno, td::uint32 subwallet_id, td::Bits256 public_key); + void do_send_external_message_cont2(td::Ref ext_msg_body); + void do_send_external_message_finish(td::Result>*> R); +}; + +template +inline td::Result entry_to_int(const tl_object_ptr& entry) { + auto num = dynamic_cast(entry.get()); + if (num == nullptr) { + return td::Status::Error("Unexpected value type"); + } + return td::to_integer_safe(num->number_->number_); +} + +template <> +inline td::Result entry_to_int(const tl_object_ptr& entry) { + auto num = dynamic_cast(entry.get()); + if (num == nullptr) { + return td::Status::Error("Unexpected value type"); + } + auto x = td::dec_string_to_int256(num->number_->number_); + if (x.is_null()) { + return td::Status::Error("Invalid integer value"); + } + return x; +} + +inline td::Result entry_to_bits256(const tl_object_ptr& entry) { + TRY_RESULT(x, entry_to_int(entry)); + td::Bits256 bits; + if (!x->export_bytes(bits.data(), 32, false)) { + return td::Status::Error("Invalid int256"); + } + return bits; +} + +bool store_coins(vm::CellBuilder& b, const td::RefInt256& x); +bool store_coins(vm::CellBuilder& b, td::uint64 x); + +struct FabricContractInit { + ContractAddress address; + td::Ref state_init; + td::Ref msg_body; +}; +td::Result generate_fabric_contract(td::actor::ActorId keyring); + +td::Ref create_new_contract_message_body(td::Ref info, td::Bits256 microchunk_hash, + td::uint64 query_id, td::RefInt256 rate, td::uint32 max_span); + +struct StorageContractData { + bool active; + td::RefInt256 balance; + td::Bits256 microchunk_hash; + td::uint64 file_size; + td::uint64 next_proof; + td::RefInt256 rate_per_mb_day; + td::uint32 max_span; + td::uint32 last_proof_time; + td::Bits256 torrent_hash; +}; + +void get_storage_contract_data(ContractAddress address, td::actor::ActorId client, + td::Promise promise); \ No newline at end of file diff --git a/storage/storage-daemon/storage-daemon-cli.cpp b/storage/storage-daemon/storage-daemon-cli.cpp new file mode 100644 index 00000000..6fe14286 --- /dev/null +++ b/storage/storage-daemon/storage-daemon-cli.cpp @@ -0,0 +1,1747 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "common/bitstring.h" +#include "keys/encryptor.h" +#include "adnl/adnl-ext-client.h" + +#include "td/utils/port/signals.h" +#include "td/utils/Parser.h" +#include "td/utils/OptionParser.h" +#include "td/utils/PathView.h" +#include "td/utils/misc.h" +#include "td/utils/filesystem.h" +#include "td/utils/port/path.h" + +#include "td/actor/MultiPromise.h" +#include "terminal/terminal.h" + +#include "auto/tl/ton_api_json.h" + +#include +#include +#include +#include +#include "git.h" +#include "common/refint.h" +#include "crypto/block/block.h" + +using namespace ton; + +bool is_whitespace(char c) { + return strchr(" \t\n\r", c) != nullptr; +}; + +td::Result> tokenize(td::Slice s) { + const char* ptr = s.begin(); + auto skip_ws = [&]() { + while (ptr != s.end() && is_whitespace(*ptr)) { + ++ptr; + } + }; + std::vector tokens; + while (true) { + skip_ws(); + if (ptr == s.end()) { + break; + } + char quote = '\0'; + if (*ptr == '"' || *ptr == '\'') { + quote = *ptr; + ++ptr; + } + std::string token; + while (true) { + if (ptr == s.end()) { + if (quote) { + return td::Status::Error("Unmatched quote"); + } + break; + } else if (*ptr == '\\') { + ++ptr; + if (ptr == s.end()) { + return td::Status::Error("Backslash at the end of the line"); + } + switch (*ptr) { + case 'n': + token += '\n'; + break; + case 't': + token += '\t'; + break; + case 'r': + token += '\r'; + break; + default: + token += *ptr; + } + ++ptr; + } else if (*ptr == quote || (!quote && is_whitespace(*ptr))) { + ++ptr; + break; + } else { + token += *ptr++; + } + } + tokens.push_back(token); + } + return tokens; +} + +std::string size_to_str(td::uint64 size) { + td::StringBuilder s; + s << td::format::as_size(size); + return s.as_cslice().str(); +} + +std::string time_to_str(td::uint32 time) { + char time_buffer[80]; + time_t rawtime = time; + struct tm tInfo; +#if defined(_WIN32) || defined(_WIN64) + struct tm* timeinfo = localtime_s(&tInfo, &rawtime) ? nullptr : &tInfo; +#else + struct tm* timeinfo = localtime_r(&rawtime, &tInfo); +#endif + assert(timeinfo == &tInfo); + strftime(time_buffer, 80, "%c", timeinfo); + return time_buffer; +} + +std::string coins_to_str(const td::RefInt256& x) { + if (x.is_null() || x->sgn() < 0) { + return "???"; + } + char buf[12]; + snprintf(buf, sizeof(buf), ".%09llu", (x % td::make_refint(1'000'000'000))->to_long()); + return (x / td::make_refint(1'000'000'000))->to_dec_string() + buf; +} + +std::string coins_to_str(const std::string& s) { + return coins_to_str(td::string_to_int256(s)); +} + +template +void print_json(const tl_object_ptr& obj) { + std::string s = td::json_encode(td::ToJson(*obj), true); + td::TerminalIO::out() << s << "\n"; +} + +void print_table(const std::vector>& table, std::set left_cols = {}) { + if (table.empty()) { + return; + } + size_t cols = table[0].size(); + std::vector col_size(cols, 0); + for (const auto& row : table) { + CHECK(row.size() == cols); + for (size_t i = 0; i < cols; ++i) { + col_size[i] = std::max(col_size[i], row[i].size()); + } + } + for (const auto& row : table) { + std::string row_str; + for (size_t i = 0; i < cols; ++i) { + if (i != 0) { + row_str += " "; + } + size_t pad = col_size[i] - row[i].size(); + if (!left_cols.count(i)) { + while (pad--) { + row_str += ' '; + } + } + row_str += row[i]; + if (left_cols.count(i)) { + while (pad--) { + row_str += ' '; + } + } + } + td::TerminalIO::out() << row_str << "\n"; + } +} + +struct OptionalProviderParams { + td::optional accept_new_contracts; + td::optional rate_per_mb_day; + td::optional max_span; + td::optional minimal_file_size; + td::optional maximal_file_size; +}; + +struct OptionalProviderConfig { + td::optional max_contracts; + td::optional max_total_size; +}; + +class StorageDaemonCli : public td::actor::Actor { + public: + explicit StorageDaemonCli(td::IPAddress server_ip, PrivateKey client_private_key, PublicKey server_public_key, + std::vector commands) + : server_ip_(server_ip) + , client_private_key_(client_private_key) + , server_public_key_(server_public_key) + , commands_(std::move(commands)) + , batch_mode_(!commands_.empty()) { + } + + void start_up() override { + class ExtClientCallback : public adnl::AdnlExtClient::Callback { + public: + explicit ExtClientCallback(td::actor::ActorId id) : id_(id) { + } + void on_ready() override { + LOG(INFO) << "Connected"; + td::actor::send_closure(id_, &StorageDaemonCli::on_conn_status, true); + } + void on_stop_ready() override { + LOG(WARNING) << "Connection closed"; + td::actor::send_closure(id_, &StorageDaemonCli::on_conn_status, false); + } + + private: + td::actor::ActorId id_; + }; + CHECK(server_ip_.is_valid()); + client_ = adnl::AdnlExtClient::create(adnl::AdnlNodeIdFull{server_public_key_}, client_private_key_, server_ip_, + std::make_unique(actor_id(this))); + + if (!batch_mode_) { + class TerminalCallback : public td::TerminalIO::Callback { + public: + void line_cb(td::BufferSlice line) override { + td::actor::send_closure(id_, &StorageDaemonCli::parse_line, std::move(line)); + } + TerminalCallback(td::actor::ActorId id) : id_(std::move(id)) { + } + + private: + td::actor::ActorId id_; + }; + io_ = td::TerminalIO::create("> ", true, false, std::make_unique(actor_id(this))); + td::actor::send_closure(io_, &td::TerminalIO::set_log_interface); + } + } + + void on_conn_status(bool status) { + if (batch_mode_ && !batch_started_) { + batch_started_ = true; + parse_line(td::BufferSlice(commands_[cur_command_++])); + } + } + + void parse_line(td::BufferSlice line) { + td::Status S = parse_line_impl(std::move(line)); + if (S.is_error()) { + command_finished(std::move(S)); + } + } + + td::Status parse_line_impl(td::BufferSlice line) { + auto parse_hash = [](const std::string& s) -> td::Result { + td::Bits256 hash; + if (hash.from_hex(s) != 256) { + return td::Status::Error("Invalid BagID"); + } + return hash; + }; + auto parse_torrent = [&](const std::string& s) -> td::Result { + if (s.length() == 64) { + return parse_hash(s); + } + if (batch_mode_) { + return td::Status::Error("Indices are not available in batch mode"); + } + TRY_RESULT(id, td::to_integer_safe(s)); + auto it = id_to_hash_.find(id); + if (it == id_to_hash_.end()) { + return td::Status::Error(PSTRING() << "Unknown index " << id); + } + return it->second; + }; + + TRY_RESULT_PREFIX(tokens, tokenize(line), "Failed to parse line: "); + if (tokens.empty()) { + command_finished(td::Status::OK()); + return td::Status::OK(); + } + if (tokens[0] == "quit" || tokens[0] == "exit") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + std::_Exit(0); + } else if (tokens[0] == "help") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_help(); + } else if (tokens[0] == "setverbosity") { + if (tokens.size() != 2) { + return td::Status::Error("Expected level"); + } + TRY_RESULT_PREFIX(level, td::to_integer_safe(tokens[1]), "Invalid level: "); + return execute_set_verbosity(level); + } else if (tokens[0] == "create") { + std::string path; + bool found_path = false; + std::string description; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "-d") { + ++i; + if (i == tokens.size()) { + return td::Status::Error("Unexpected EOLN"); + } + description = tokens[i]; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_path) { + return td::Status::Error("Unexpected token"); + } + path = tokens[i]; + found_path = true; + } + if (!found_path) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_create(std::move(path), std::move(description), json); + } else if (tokens[0] == "add-by-hash" || tokens[0] == "add-by-meta") { + td::optional param; + std::string root_dir; + bool paused = false; + bool json = false; + td::optional> partial; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "-d") { + ++i; + if (i == tokens.size()) { + return td::Status::Error("Unexpected EOLN"); + } + root_dir = tokens[i]; + continue; + } + if (tokens[i] == "--paused") { + paused = true; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + if (tokens[i] == "--partial") { + partial = std::vector(tokens.begin() + i + 1, tokens.end()); + break; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (param) { + return td::Status::Error("Unexpected token"); + } + param = tokens[i]; + } + if (!param) { + return td::Status::Error("Unexpected EOLN"); + } + if (tokens[0] == "add-by-hash") { + TRY_RESULT(hash, parse_hash(param.value())); + return execute_add_by_hash(hash, std::move(root_dir), paused, std::move(partial), json); + } else { + return execute_add_by_meta(param.value(), std::move(root_dir), paused, std::move(partial), json); + } + } else if (tokens[0] == "list") { + bool with_hashes = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (tokens[i] == "--hashes") { + with_hashes = true; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unexpected argument " << tokens[i]); + } + return execute_list(with_hashes, json); + } else if (tokens[0] == "get") { + td::Bits256 hash; + bool found_hash = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_get(hash, json); + } else if (tokens[0] == "get-meta") { + if (tokens.size() != 3) { + return td::Status::Error("Expected bag and file"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + return execute_get_meta(hash, tokens[2]); + } else if (tokens[0] == "get-peers") { + td::Bits256 hash; + bool found_hash = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_get_peers(hash, json); + } else if (tokens[0] == "download-pause" || tokens[0] == "download-resume") { + if (tokens.size() != 2) { + return td::Status::Error("Expected bag"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + return execute_set_active_download(hash, tokens[0] == "download-resume"); + } else if (tokens[0] == "priority-all") { + if (tokens.size() != 3) { + return td::Status::Error("Expected bag and priority"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + TRY_RESULT_PREFIX(priority, td::to_integer_safe(tokens[2]), "Invalid priority: "); + return execute_set_priority_all(hash, priority); + } else if (tokens[0] == "priority-idx") { + if (tokens.size() != 4) { + return td::Status::Error("Expected bag, idx and priority"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + TRY_RESULT_PREFIX(idx, td::to_integer_safe(tokens[2]), "Invalid idx: "); + TRY_RESULT_PREFIX(priority, td::to_integer_safe(tokens[3]), "Invalid priority: "); + return execute_set_priority_idx(hash, idx, priority); + } else if (tokens[0] == "priority-name") { + if (tokens.size() != 4) { + return td::Status::Error("Expected bag, name and priority"); + } + TRY_RESULT(hash, parse_torrent(tokens[1])); + TRY_RESULT_PREFIX(priority, td::to_integer_safe(tokens[3]), "Invalid priority: "); + return execute_set_priority_name(hash, tokens[2], priority); + } else if (tokens[0] == "remove") { + td::Bits256 hash; + bool found_hash = false; + bool remove_files = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--remove-files") { + remove_files = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_remove(hash, remove_files); + } else if (tokens[0] == "load-from") { + td::Bits256 hash; + std::string meta, path; + bool found_hash = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--meta") { + ++i; + meta = tokens[i]; + continue; + } + if (tokens[i] == "--files") { + ++i; + path = tokens[i]; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (found_hash) { + return td::Status::Error("Unexpected token"); + } + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + found_hash = true; + } + if (!found_hash) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_load_from(hash, std::move(meta), std::move(path)); + } else if (tokens[0] == "new-contract-message") { + td::Bits256 hash; + std::string file; + td::uint64 query_id = 0; + int cnt = 0; + td::optional provider_address; + td::optional rate; + td::optional max_span; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--query-id") { + ++i; + TRY_RESULT_PREFIX_ASSIGN(query_id, td::to_integer_safe(tokens[i]), "Invalid query id: "); + continue; + } + if (tokens[i] == "--provider") { + ++i; + provider_address = tokens[i]; + continue; + } + if (tokens[i] == "--rate") { + ++i; + rate = tokens[i]; + continue; + } + if (tokens[i] == "--max-span") { + ++i; + TRY_RESULT_PREFIX_ASSIGN(max_span, td::to_integer_safe(tokens[i]), "Invalid max span: "); + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (cnt == 0) { + TRY_RESULT_ASSIGN(hash, parse_torrent(tokens[i])); + } else if (cnt == 1) { + file = tokens[i]; + } + ++cnt; + } + if (cnt != 2) { + return td::Status::Error("Unexpected EOLN"); + } + return execute_new_contract_message(hash, std::move(file), query_id, std::move(provider_address), std::move(rate), + max_span); + } else if (tokens[0] == "import-pk") { + if (tokens.size() != 2) { + return td::Status::Error("Expected filename"); + } + return execute_import_pk(tokens[1]); + } else if (tokens[0] == "get-provider-params") { + bool json = false; + std::string address; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (!address.empty()) { + return td::Status::Error("Unexpected token"); + } + address = tokens[i]; + } + return execute_get_provider_params(address, json); + } else if (tokens[0] == "deploy-provider") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_deploy_provider(); + } else if (tokens[0] == "init-provider") { + if (tokens.size() != 2) { + return td::Status::Error("Expected address"); + } + return execute_init_provider(tokens[1]); + } else if (tokens[0] == "remove-storage-provider") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_remove_storage_provider(); + } else if (tokens[0] == "set-provider-params") { + if (tokens.size() == 1) { + return td::Status::Error("No parameters specified"); + } + if (tokens.size() % 2 == 0) { + return td::Status::Error("Unexpected number of tokens"); + } + OptionalProviderParams new_params; + for (size_t i = 1; i < tokens.size(); i += 2) { + if (tokens[i] == "--accept") { + if (tokens[i + 1] == "0") { + new_params.accept_new_contracts = false; + } else if (tokens[i + 1] == "1") { + new_params.accept_new_contracts = true; + } else { + return td::Status::Error("Invalid value for --accept"); + } + continue; + } + if (tokens[i] == "--rate") { + new_params.rate_per_mb_day = tokens[i + 1]; + continue; + } + if (tokens[i] == "--max-span") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-span: "); + new_params.max_span = x; + continue; + } + if (tokens[i] == "--min-file-size") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --min-file-size: "); + new_params.minimal_file_size = x; + continue; + } + if (tokens[i] == "--max-file-size") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-file-size: "); + new_params.maximal_file_size = x; + continue; + } + return td::Status::Error(PSTRING() << "Unexpected token " << tokens[i]); + } + return execute_set_provider_params(std::move(new_params)); + } else if (tokens[0] == "get-provider-info") { + bool with_balances = false; + bool with_contracts = false; + bool json = false; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--balances") { + with_balances = true; + continue; + } + if (tokens[i] == "--contracts") { + with_contracts = true; + continue; + } + if (tokens[i] == "--json") { + json = true; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + } + return execute_get_provider_info(with_balances, with_contracts, json); + } else if (tokens[0] == "set-provider-config") { + if (tokens.size() == 1) { + return td::Status::Error("No parameters specified"); + } + if (tokens.size() % 2 == 0) { + return td::Status::Error("Unexpected number of tokens"); + } + OptionalProviderConfig new_config; + for (size_t i = 1; i < tokens.size(); i += 2) { + if (tokens[i] == "--max-contracts") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-contracts: "); + new_config.max_contracts = x; + continue; + } + if (tokens[i] == "--max-total-size") { + TRY_RESULT_PREFIX(x, td::to_integer_safe(tokens[i + 1]), "Invalid value for --max-total-size: "); + new_config.max_total_size = x; + continue; + } + return td::Status::Error(PSTRING() << "Unexpected token " << tokens[i]); + } + return execute_set_provider_config(std::move(new_config)); + } else if (tokens[0] == "withdraw") { + if (tokens.size() != 2) { + return td::Status::Error("Expected contract address"); + } + return execute_withdraw(tokens[1]); + } else if (tokens[0] == "withdraw-all") { + if (tokens.size() != 1) { + return td::Status::Error("Unexpected tokens"); + } + return execute_withdraw_all(); + } else if (tokens[0] == "send-coins") { + std::string address; + std::string amount; + int cnt = 0; + std::string message; + for (size_t i = 1; i < tokens.size(); ++i) { + if (!tokens[i].empty() && tokens[i][0] == '-') { + if (tokens[i] == "--message") { + ++i; + if (i == tokens.size()) { + return td::Status::Error("Expected message"); + } + message = tokens[i]; + continue; + } + return td::Status::Error(PSTRING() << "Unknown flag " << tokens[i]); + } + if (cnt == 0) { + address = tokens[i]; + } else if (cnt == 1) { + amount = tokens[i]; + } else { + return td::Status::Error("Expected address and amount"); + } + ++cnt; + } + if (cnt != 2) { + return td::Status::Error("Expected address and amount"); + } + return execute_send_coins(address, amount, message); + } else if (tokens[0] == "close-contract") { + if (tokens.size() != 2) { + return td::Status::Error("Expected address"); + } + return execute_close_contract(tokens[1]); + } else { + return td::Status::Error(PSTRING() << "Error: unknown command " << tokens[0]); + } + } + + td::Status execute_help() { + td::TerminalIO::out() << "help\tPrint this help\n"; + td::TerminalIO::out() << "create [-d description] [--json] \tCreate bag of files from \n"; + td::TerminalIO::out() << "\t-d - Description will be stored in torrent info.\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() + << "add-by-hash [-d root_dir] [--paused] [--json] [--partial file1 file2 ...]\tAdd bag " + "with given BagID (in hex)\n"; + td::TerminalIO::out() << "\t-d\tTarget directory, default is an internal directory of storage-daemon\n"; + td::TerminalIO::out() << "\t--paused\tDon't start download immediately\n"; + td::TerminalIO::out() + << "\t--partial\tEverything after this flag is a list of filenames. Only these files will be downloaded.\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "add-by-meta [-d root_dir] [--paused] [--json] [--partial file1 file2 ...]\tLoad " + "meta from file and add bag\n"; + td::TerminalIO::out() << "\tFlags are the same as in add-by-hash\n"; + td::TerminalIO::out() << "list [--hashes] [--json]\tPrint list of bags\n"; + td::TerminalIO::out() << "\t--hashes\tPrint full BagID\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "get [--json]\tPrint information about \n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "\tHere and below bags are identified by BagID (in hex) or index (see bag list)\n"; + td::TerminalIO::out() << "get-meta \tSave bag meta of to \n"; + td::TerminalIO::out() << "get-peers [--json]\tPrint a list of peers\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "download-pause \tPause download of \n"; + td::TerminalIO::out() << "download-resume \tResume download of \n"; + td::TerminalIO::out() << "priority-all

\tSet priority of all files in to

\n"; + td::TerminalIO::out() << "\tPriority is in [0..255], 0 - don't download\n"; + td::TerminalIO::out() << "priority-idx

\tSet priority of file # in to

\n"; + td::TerminalIO::out() << "\tPriority is in [0..255], 0 - don't download\n"; + td::TerminalIO::out() << "priority-name

\tSet priority of file in to

\n"; + td::TerminalIO::out() << "\tPriority is in [0..255], 0 - don't download\n"; + td::TerminalIO::out() << "remove [--remove-files]\tRemove \n"; + td::TerminalIO::out() << "\t--remove-files - also remove all files\n"; + td::TerminalIO::out() << "load-from [--meta meta] [--files path]\tProvide meta and data for an existing " + "incomplete bag.\n"; + td::TerminalIO::out() << "\t--meta meta\ttorrent info and header will be inited (if not ready) from meta file\n"; + td::TerminalIO::out() << "\t--files path\tdata for files will be taken from here\n"; + td::TerminalIO::out() << "new-contract-message [--query-id id] --provider \tCreate " + "\"new contract message\" for storage provider. Saves message body to .\n"; + td::TerminalIO::out() << "\t\tAddress of storage provider account to take parameters from.\n"; + td::TerminalIO::out() << "new-contract-message [--query-id id] --rate --max-span " + "\tSame thing, but parameters are not fetched automatically.\n"; + td::TerminalIO::out() << "exit\tExit\n"; + td::TerminalIO::out() << "quit\tExit\n"; + td::TerminalIO::out() << "setverbosity \tSet vetbosity to in [0..10]\n"; + td::TerminalIO::out() << "\nStorage provider control:\n"; + td::TerminalIO::out() << "import-pk \tImport private key from \n"; + td::TerminalIO::out() << "deploy-provider\tInit storage provider by deploying a new provider smart contract\n"; + td::TerminalIO::out() + << "init-provider \tInit storage provider using the existing provider smart contract\n"; + td::TerminalIO::out() << "remove-storage-provider\tRemove storage provider\n"; + td::TerminalIO::out() + << "\tSmart contracts in blockchain and bags will remain intact, but they will not be managed anymore\n"; + td::TerminalIO::out() << "get-provider-params [address] [--json]\tPrint parameters of the smart contract\n"; + td::TerminalIO::out() + << "\taddress\tAddress of a smart contract. Default is the provider managed by this daemon.\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() << "set-provider-params [--accept x] [--rate x] [--max-span x] [--min-file-size x] " + "[--max-file-size x]\tSet parameters of the smart contract\n"; + td::TerminalIO::out() << "\t--accept\tAccept new contracts: 0 (no) or 1 (yes)\n"; + td::TerminalIO::out() << "\t--rate\tPrice of storage, nanoTON per MB*day\n"; + td::TerminalIO::out() << "\t--max-span\n"; + td::TerminalIO::out() << "\t--min-file-size\tMinimal total size of a bag of files (bytes)\n"; + td::TerminalIO::out() << "\t--max-file-size\tMaximal total size of a bag of files (bytes)\n"; + td::TerminalIO::out() + << "get-provider-info [--balances] [--contracts] [--json]\tPrint information about storage provider\n"; + td::TerminalIO::out() << "\t--contracts\tPrint list of storage contracts\n"; + td::TerminalIO::out() << "\t--balances\tPrint balances of the main contract and storage contracts\n"; + td::TerminalIO::out() << "\t--json\tOutput in json\n"; + td::TerminalIO::out() + << "set-provider-config [--max-contracts x] [--max-total-size x]\tSet configuration parameters\n"; + td::TerminalIO::out() << "\t--max-contracts\tMaximal number of storage contracts\n"; + td::TerminalIO::out() << "\t--max-total-size\tMaximal total size storage contracts (in bytes)\n"; + td::TerminalIO::out() << "withdraw

\tSend bounty from storage contract
to the main contract\n"; + td::TerminalIO::out() << "withdraw-all\tSend bounty from all storage contracts (where at least 1 TON is available) " + "to the main contract\n"; + td::TerminalIO::out() + << "send-coins
[--message msg]\tSend nanoTON to
from the main contract\n"; + td::TerminalIO::out() + << "close-contract
\tClose storage contract
and delete bag (if possible)\n"; + command_finished(td::Status::OK()); + return td::Status::OK(); + } + + td::Status execute_set_verbosity(int level) { + auto query = create_tl_object(level); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Success\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_create(std::string path, std::string description, bool json) { + TRY_RESULT_PREFIX_ASSIGN(path, td::realpath(path), "Invalid path: "); + auto query = create_tl_object(path, description); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::TerminalIO::out() << "Bag created\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_add_by_hash(td::Bits256 hash, std::string root_dir, bool paused, + td::optional> partial, bool json) { + if (!root_dir.empty()) { + TRY_STATUS_PREFIX(td::mkpath(root_dir), "Failed to create directory: "); + TRY_STATUS_PREFIX(td::mkdir(root_dir), "Failed to create directory: "); + TRY_RESULT_PREFIX_ASSIGN(root_dir, td::realpath(root_dir), "Invalid path: "); + } + std::vector> priorities; + if (partial) { + priorities.push_back(create_tl_object(0)); + for (std::string& f : partial.value()) { + priorities.push_back(create_tl_object(std::move(f), 1)); + } + } + auto query = + create_tl_object(hash, std::move(root_dir), !paused, std::move(priorities)); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::TerminalIO::out() << "Bag added\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_add_by_meta(std::string meta_file, std::string root_dir, bool paused, + td::optional> partial, bool json) { + TRY_RESULT_PREFIX(meta, td::read_file(meta_file), "Failed to read meta: "); + if (!root_dir.empty()) { + TRY_STATUS_PREFIX(td::mkpath(root_dir), "Failed to create directory: "); + TRY_STATUS_PREFIX(td::mkdir(root_dir), "Failed to create directory: "); + TRY_RESULT_PREFIX_ASSIGN(root_dir, td::realpath(root_dir), "Invalid path: "); + } + std::vector> priorities; + if (partial) { + priorities.push_back(create_tl_object(0)); + for (std::string& f : partial.value()) { + priorities.push_back(create_tl_object(std::move(f), 1)); + } + } + auto query = create_tl_object(std::move(meta), std::move(root_dir), !paused, + std::move(priorities)); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::TerminalIO::out() << "Bag added\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_list(bool with_hashes, bool json) { + auto query = create_tl_object(); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_list, R.move_as_ok(), with_hashes); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get(td::Bits256 hash, bool json) { + auto query = create_tl_object(hash); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::print_torrent_full, R.move_as_ok()); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get_meta(td::Bits256 hash, std::string meta_file) { + auto query = create_tl_object(hash); + send_query(std::move(query), + [SelfId = actor_id(this), meta_file](td::Result> R) { + if (R.is_error()) { + return; + } + auto data = std::move(R.ok_ref()->meta_); + auto S = td::write_file(meta_file, data); + if (S.is_error()) { + td::actor::send_closure( + SelfId, &StorageDaemonCli::command_finished, + S.move_as_error_prefix(PSTRING() << "Failed to write meta (" << data.size() << " B): ")); + return; + } + td::TerminalIO::out() << "Saved meta (" << data.size() << " B)\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get_peers(td::Bits256 hash, bool json) { + auto query = create_tl_object(hash); + send_query( + std::move(query), [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + auto obj = R.move_as_ok(); + td::TerminalIO::out() << "BagID " << hash.to_hex() << "\n"; + td::TerminalIO::out() << "Download speed: " << td::format::as_size((td::uint64)obj->download_speed_) + << "/s\n"; + td::TerminalIO::out() << "Upload speed: " << td::format::as_size((td::uint64)obj->upload_speed_) << "/s\n"; + td::TerminalIO::out() << "Peers: " << obj->peers_.size() << "\n"; + std::vector> table; + table.push_back({"ADNL id", "Address", "Download", "Upload", "Ready"}); + for (auto& peer : obj->peers_) { + std::vector row; + row.push_back(PSTRING() << peer->adnl_id_); + row.push_back(peer->ip_str_); + row.push_back(PSTRING() << td::format::as_size((td::uint64)peer->download_speed_) << "/s"); + row.push_back(PSTRING() << td::format::as_size((td::uint64)peer->upload_speed_) << "/s"); + if (obj->total_parts_ > 0) { + char buf[10]; + snprintf(buf, sizeof(buf), "%5.1f%%", (double)peer->ready_parts_ / (double)obj->total_parts_ * 100); + row.push_back(buf); + } else { + row.push_back("???"); + } + table.push_back(std::move(row)); + } + print_table(table); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_active_download(td::Bits256 hash, bool active) { + auto query = create_tl_object(hash, active); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Success\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_priority_all(td::Bits256 hash, td::uint8 priority) { + auto query = create_tl_object(hash, priority); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (R.ok()->get_id() == ton_api::storage_daemon_prioritySet::ID) { + td::TerminalIO::out() << "Priority was set\n"; + } else { + td::TerminalIO::out() << "Torrent header is not available, priority will be set later\n"; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_priority_idx(td::Bits256 hash, td::uint64 idx, td::uint8 priority) { + auto query = create_tl_object(hash, idx, priority); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (R.ok()->get_id() == ton_api::storage_daemon_prioritySet::ID) { + td::TerminalIO::out() << "Priority was set\n"; + } else { + td::TerminalIO::out() << "Torrent header is not available, priority will be set later\n"; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_priority_name(td::Bits256 hash, std::string name, td::uint8 priority) { + auto query = create_tl_object(hash, std::move(name), priority); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (R.ok()->get_id() == ton_api::storage_daemon_prioritySet::ID) { + td::TerminalIO::out() << "Priority was set\n"; + } else { + td::TerminalIO::out() << "Torrent header is not available, priority will be set later\n"; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_remove(td::Bits256 hash, bool remove_files) { + auto query = create_tl_object(hash, remove_files); + send_query(std::move(query), + [SelfId = actor_id(this), hash](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Success\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::delete_id, hash); + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_load_from(td::Bits256 hash, std::string meta, std::string path) { + if (meta.empty() && path.empty()) { + return td::Status::Error("Expected meta or files"); + } + td::BufferSlice meta_data; + if (!meta.empty()) { + TRY_RESULT_PREFIX_ASSIGN(meta_data, td::read_file(meta), "Failed to read meta: "); + } + if (!path.empty()) { + TRY_RESULT_PREFIX_ASSIGN(path, td::realpath(path), "Invalid path: "); + } + auto query = create_tl_object(hash, std::move(meta_data), std::move(path)); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + auto torrent = R.move_as_ok(); + td::TerminalIO::out() << "Loaded data for bag " << torrent->hash_.to_hex() << "\n"; + if (torrent->flags_ & 4) { // fatal error + td::TerminalIO::out() << "FATAL ERROR: " << torrent->fatal_error_ << "\n"; + } + if (torrent->flags_ & 1) { // info ready + td::TerminalIO::out() << "Total size: " << td::format::as_size(torrent->total_size_) << "\n"; + if (torrent->flags_ & 2) { // header ready + td::TerminalIO::out() << "Ready: " << td::format::as_size(torrent->downloaded_size_) << "/" + << td::format::as_size(torrent->included_size_) + << (torrent->completed_ ? " (completed)" : "") << "\n"; + } else { + td::TerminalIO::out() << "Torrent header is not ready\n"; + } + } else { + td::TerminalIO::out() << "Torrent info is not ready\n"; + } + }); + return td::Status::OK(); + } + + td::Status execute_new_contract_message(td::Bits256 hash, std::string file, td::uint64 query_id, + td::optional provider_address, td::optional rate, + td::optional max_span) { + tl_object_ptr params; + if (provider_address) { + if (rate || max_span) { + return td::Status::Error("Incompatible flags"); + } + params = create_tl_object(provider_address.unwrap()); + } else { + if (!rate || !max_span) { + return td::Status::Error("No flags are set"); + } + params = create_tl_object(rate.unwrap(), max_span.unwrap()); + } + auto query = create_tl_object(hash, query_id, std::move(params)); + send_query(std::move(query), [SelfId = actor_id(this), + file](td::Result> R) { + if (R.is_error()) { + return; + } + auto obj = R.move_as_ok(); + auto S = td::write_file(file, obj->body_); + if (S.is_error()) { + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, + S.move_as_error_prefix(PSTRING() << "Failed to write to file: ")); + return; + } + td::TerminalIO::out() << "Saved message body to file\n"; + td::TerminalIO::out() << "Rate (nanoTON per mb*day): " << obj->rate_ << "\n"; + td::TerminalIO::out() << "Max span: " << obj->max_span_ << "\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_import_pk(std::string file) { + TRY_RESULT(data, td::read_file_secure(file)); + TRY_RESULT(pk, ton::PrivateKey::import(data.as_slice())); + auto query = create_tl_object(pk.tl()); + send_query( + std::move(query), [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Imported private key. Public key hash: " << R.ok()->key_hash_.to_hex() << "\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_deploy_provider() { + auto query = create_tl_object(); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + auto obj = R.move_as_ok(); + block::StdAddress std_address; + CHECK(std_address.parse_addr(obj->address_)); + std_address.bounceable = false; + td::TerminalIO::out() << "Address: " << obj->address_ << "\n"; + td::TerminalIO::out() << "Non-bounceable address: " << std_address.rserialize() << "\n"; + td::TerminalIO::out() + << "Send a non-bounceable message with 1 TON to this address to initialize smart contract.\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_init_provider(std::string address) { + auto query = create_tl_object(std::move(address)); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Address of the storage provider was set\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_remove_storage_provider() { + auto query = create_tl_object(); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Storage provider removed\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_get_provider_params(std::string address, bool json) { + auto query = create_tl_object(address); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + auto params = R.move_as_ok(); + td::TerminalIO::out() << "Storage provider parameters:\n"; + td::TerminalIO::out() << "Accept new contracts: " << params->accept_new_contracts_ << "\n"; + td::TerminalIO::out() << "Rate (nanoTON per day*MB): " << params->rate_per_mb_day_ << "\n"; + td::TerminalIO::out() << "Max span: " << (td::uint32)params->max_span_ << "\n"; + td::TerminalIO::out() << "Min file size: " << (td::uint64)params->minimal_file_size_ << "\n"; + td::TerminalIO::out() << "Max file size: " << (td::uint64)params->maximal_file_size_ << "\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_provider_params(OptionalProviderParams new_params) { + auto query_get = create_tl_object(); + send_query(std::move(query_get), [SelfId = actor_id(this), new_params = std::move(new_params)]( + td::Result> R) mutable { + if (R.is_error()) { + return; + } + td::actor::send_closure(SelfId, &StorageDaemonCli::execute_set_provider_params_cont, R.move_as_ok(), + std::move(new_params)); + }); + return td::Status::OK(); + } + + void execute_set_provider_params_cont(tl_object_ptr params, + OptionalProviderParams new_params) { + if (new_params.accept_new_contracts) { + params->accept_new_contracts_ = new_params.accept_new_contracts.unwrap(); + } + if (new_params.rate_per_mb_day) { + params->rate_per_mb_day_ = new_params.rate_per_mb_day.unwrap(); + } + if (new_params.max_span) { + params->max_span_ = new_params.max_span.unwrap(); + } + if (new_params.minimal_file_size) { + params->minimal_file_size_ = new_params.minimal_file_size.unwrap(); + } + if (new_params.maximal_file_size) { + params->maximal_file_size_ = new_params.maximal_file_size.unwrap(); + } + td::TerminalIO::out() << "Sending external message to update provider parameters...\n"; + auto query_set = create_tl_object(std::move(params)); + send_query(std::move(query_set), + [SelfId = actor_id(this)](td::Result> R) mutable { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Storage provider parameters were updated\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + } + + td::Status execute_get_provider_info(bool with_balances, bool with_contracts, bool json) { + auto query = create_tl_object(with_balances, with_contracts); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + if (json) { + print_json(R.ok()); + return; + } + auto info = R.move_as_ok(); + td::TerminalIO::out() << "Storage provider " << info->address_ << "\n"; + td::TerminalIO::out() << "Storage contracts: " << (td::uint32)info->contracts_count_ << " / " + << (td::uint32)info->config_->max_contracts_ << "\n"; + td::TerminalIO::out() << "Total size: " << size_to_str(info->contracts_total_size_) << " / " + << size_to_str(info->config_->max_total_size_) << "\n"; + if (with_balances) { + td::TerminalIO::out() << "Main contract balance: " << coins_to_str(info->balance_) << " TON\n"; + } + if (with_contracts) { + td::TerminalIO::out() << "Storage contracts: " << info->contracts_.size() << "\n"; + std::vector> table; + table.push_back({"Address", "BagID", "Created at", "Size", "State"}); + if (with_balances) { + table.back().push_back("Client$"); + table.back().push_back("Contract$"); + } + for (const auto& c : info->contracts_) { + table.emplace_back(); + table.back().push_back(c->address_); + table.back().push_back(c->torrent_.to_hex()); + table.back().push_back(time_to_str(c->created_time_)); + table.back().push_back(size_to_str(c->file_size_)); + // enum State { st_downloading = 0, st_downloaded = 1, st_active = 2, st_closing = 3 }; + switch (c->state_) { + case 0: + table.back().push_back("Downloading (" + size_to_str(c->downloaded_size_) + ")"); + break; + case 1: + table.back().push_back("Downloaded"); + break; + case 2: + table.back().push_back("Active"); + break; + case 3: + table.back().push_back("Closing"); + break; + default: + table.back().push_back("???"); + } + if (with_balances) { + table.back().push_back(coins_to_str(c->client_balance_)); + table.back().push_back(coins_to_str(c->contract_balance_)); + } + } + print_table(table); + } + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_set_provider_config(OptionalProviderConfig new_config) { + auto query_get = create_tl_object(false, false); + send_query(std::move(query_get), [SelfId = actor_id(this), new_config = std::move(new_config)]( + td::Result> R) mutable { + if (R.is_error()) { + return; + } + auto info = R.move_as_ok(); + td::actor::send_closure(SelfId, &StorageDaemonCli::execute_set_provider_config_cont, std::move(info->config_), + std::move(new_config)); + }); + return td::Status::OK(); + } + + void execute_set_provider_config_cont(tl_object_ptr config, + OptionalProviderConfig new_config) { + if (new_config.max_contracts) { + config->max_contracts_ = new_config.max_contracts.unwrap(); + } + if (new_config.max_total_size) { + config->max_total_size_ = new_config.max_total_size.unwrap(); + } + auto query_set = create_tl_object(std::move(config)); + send_query(std::move(query_set), + [SelfId = actor_id(this)](td::Result> R) mutable { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Storage provider config was updated\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + } + + td::Status execute_withdraw(std::string address) { + auto query = create_tl_object(std::move(address)); + td::TerminalIO::out() << "Sending external message...\n"; + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Bounty was withdrawn\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_withdraw_all() { + auto query = create_tl_object(true, true); + send_query(std::move(query), + [=, SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + auto info = R.move_as_ok(); + std::vector addresses; + for (auto& contract : info->contracts_) { + if (contract->state_ != 2) { + continue; + } + td::RefInt256 remaining = td::dec_string_to_int256(contract->contract_balance_) - + td::dec_string_to_int256(contract->client_balance_); + if (remaining < td::make_refint(1'000'000'000)) { + continue; + } + td::TerminalIO::out() << "Withdrawing from " << contract->address_ << " (" << coins_to_str(remaining) + << " TON)\n"; + addresses.push_back(contract->address_); + } + if (addresses.empty()) { + td::TerminalIO::out() << "Nothing to withdraw\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + } else { + td::actor::send_closure(SelfId, &StorageDaemonCli::execute_withdraw_all_cont, std::move(addresses)); + } + }); + return td::Status::OK(); + } + + void execute_withdraw_all_cont(std::vector addresses) { + td::MultiPromise mp; + auto ig = mp.init_guard(); + ig.add_promise([SelfId = actor_id(this), cnt = addresses.size()](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, R.move_as_error()); + return; + } + td::TerminalIO::out() << "Sent bounty from " << cnt << " contracts\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + for (auto& address : addresses) { + auto query = create_tl_object(std::move(address)); + send_query(std::move(query), + ig.get_promise().wrap([](tl_object_ptr) { return td::Unit(); })); + } + } + + td::Status execute_send_coins(std::string address, std::string amount, std::string message) { + auto query = + create_tl_object(std::move(address), std::move(amount), std::move(message)); + td::TerminalIO::out() << "Sending external messages...\n"; + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Internal message was sent\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + td::Status execute_close_contract(std::string address) { + auto query = create_tl_object(std::move(address)); + send_query(std::move(query), + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + return; + } + td::TerminalIO::out() << "Closing storage contract\n"; + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, td::Status::OK()); + }); + return td::Status::OK(); + } + + template + void send_query(tl_object_ptr query, td::Promise promise, bool process_error = true) { + td::actor::send_closure( + client_, &adnl::AdnlExtClient::send_query, "q", serialize_tl_object(query, true), td::Timestamp::in(1800.0), + [SelfId = actor_id(this), promise = std::move(promise), process_error](td::Result R) mutable { + td::Result result; + if (R.is_error()) { + if (R.error().message().empty() && R.error().code() == ErrorCode::cancelled) { + result = td::Status::Error("Query error: failed to connect"); + } else { + result = R.move_as_error_prefix("Query error: "); + } + } else { + td::BufferSlice data = R.move_as_ok(); + result = fetch_tl_object(data, true); + if (result.is_error()) { + auto R3 = fetch_tl_object(data, true); + if (R3.is_ok()) { + result = td::Status::Error("Query error: " + R3.ok()->message_); + } + } + } + if (result.is_ok()) { + promise.set_value(result.move_as_ok()); + } else { + promise.set_error(result.error().clone()); + if (process_error) { + td::actor::send_closure(SelfId, &StorageDaemonCli::command_finished, result.move_as_error()); + } + } + }); + } + + void command_finished(td::Status S) { + if (S.is_error()) { + td::TerminalIO::out() << S.message() << "\n"; + if (batch_mode_) { + std::exit(2); + } + } else if (batch_mode_) { + if (cur_command_ == commands_.size()) { + std::exit(0); + } else { + parse_line(td::BufferSlice(commands_[cur_command_++])); + } + } + } + + private: + td::IPAddress server_ip_; + PrivateKey client_private_key_; + PublicKey server_public_key_; + std::vector commands_; + bool batch_mode_ = false; + bool batch_started_ = false; + size_t cur_command_ = 0; + td::actor::ActorOwn client_; + td::actor::ActorOwn io_; + + std::map id_to_hash_; + std::map hash_to_id_; + td::uint32 cur_id_ = 0; + + void add_id(td::Bits256 hash) { + if (hash_to_id_.emplace(hash, cur_id_).second) { + id_to_hash_[cur_id_++] = hash; + } + } + + void delete_id(td::Bits256 hash) { + auto it = hash_to_id_.find(hash); + if (it != hash_to_id_.end()) { + id_to_hash_.erase(it->second); + hash_to_id_.erase(it); + } + } + + void update_ids(std::vector hashes) { + for (const td::Bits256& hash : hashes) { + add_id(hash); + } + std::sort(hashes.begin(), hashes.end()); + for (auto it = hash_to_id_.begin(); it != hash_to_id_.end();) { + if (std::binary_search(hashes.begin(), hashes.end(), it->first)) { + ++it; + } else { + id_to_hash_.erase(it->second); + it = hash_to_id_.erase(it); + } + } + } + + void print_torrent_full(tl_object_ptr ptr) { + auto& obj = *ptr; + add_id(obj.torrent_->hash_); + td::TerminalIO::out() << "BagID = " << obj.torrent_->hash_.to_hex() << "\n"; + td::TerminalIO::out() << "Index = " << hash_to_id_[obj.torrent_->hash_] << "\n"; + if (obj.torrent_->flags_ & 4) { // fatal error + td::TerminalIO::out() << "FATAL ERROR: " << obj.torrent_->fatal_error_ << "\n"; + } + if (obj.torrent_->flags_ & 1) { // info ready + if (!obj.torrent_->description_.empty()) { + td::TerminalIO::out() << "-----------------------------------\n"; + td::TerminalIO::out() << obj.torrent_->description_ << "\n"; + td::TerminalIO::out() << "-----------------------------------\n"; + } + if (obj.torrent_->flags_ & 2) { // header ready + td::TerminalIO::out() << "Downloaded: " << td::format::as_size(obj.torrent_->downloaded_size_) << "/" + << td::format::as_size(obj.torrent_->included_size_) + << (obj.torrent_->completed_ + ? " (completed)" + : " (remaining " + + size_to_str(obj.torrent_->included_size_ - obj.torrent_->downloaded_size_) + + ")") + << "\n"; + td::TerminalIO::out() << "Dir name: " << obj.torrent_->dir_name_ << "\n"; + } + td::TerminalIO::out() << "Total size: " << td::format::as_size(obj.torrent_->total_size_) << "\n"; + } else { + td::TerminalIO::out() << "Torrent info is not available\n"; + } + if (obj.torrent_->completed_) { + } else if (obj.torrent_->active_download_) { + td::TerminalIO::out() << "Download speed: " << td::format::as_size((td::uint64)obj.torrent_->download_speed_) + << "/s\n"; + } else { + td::TerminalIO::out() << "Download paused\n"; + } + td::TerminalIO::out() << "Upload speed: " << td::format::as_size((td::uint64)obj.torrent_->upload_speed_) << "/s\n"; + td::TerminalIO::out() << "Root dir: " << obj.torrent_->root_dir_ << "\n"; + if (obj.torrent_->flags_ & 2) { // header ready + td::TerminalIO::out() << obj.files_.size() << " files:\n"; + td::TerminalIO::out() << "###### Prior Ready/Size Name\n"; + td::uint32 i = 0; + for (const auto& f : obj.files_) { + char str[64]; + char priority[4] = "---"; + if (f->priority_ > 0) { + CHECK(f->priority_ <= 255); + snprintf(priority, sizeof(priority), "%03d", f->priority_); + } + snprintf(str, sizeof(str), "%6u: (%s) %7s/%-7s %s ", i, priority, + f->priority_ == 0 ? "---" : size_to_str(f->downloaded_size_).c_str(), size_to_str(f->size_).c_str(), + (f->downloaded_size_ == f->size_ ? "+" : " ")); + td::TerminalIO::out() << str << f->name_ << "\n"; + ++i; + } + } else { + td::TerminalIO::out() << "Torrent header is not available\n"; + } + } + + void print_torrent_list(tl_object_ptr ptr, bool with_hashes) { + auto& obj = *ptr; + std::vector hashes; + for (const auto& torrent : obj.torrents_) { + hashes.push_back(torrent->hash_); + } + update_ids(std::move(hashes)); + std::sort(obj.torrents_.begin(), obj.torrents_.end(), + [&](const tl_object_ptr& a, + const tl_object_ptr& b) { + return hash_to_id_[a->hash_] < hash_to_id_[b->hash_]; + }); + td::TerminalIO::out() << obj.torrents_.size() << " bags\n"; + std::vector> table; + table.push_back({"#####", "BagID", "Description", "Downloaded", "Total", "Speed"}); + for (const auto& torrent : obj.torrents_) { + std::vector row; + row.push_back(std::to_string(hash_to_id_[torrent->hash_])); + std::string hash_str = torrent->hash_.to_hex(); + if (!with_hashes) { + hash_str = hash_str.substr(0, 8) + "..."; + } + row.push_back(hash_str); + std::string description = torrent->description_; + for (size_t i = 0; i < description.size(); ++i) { + if (!is_whitespace(description[i])) { + description.erase(description.begin(), description.begin() + i); + break; + } + } + for (size_t i = 0; i < description.size(); ++i) { + if (description[i] == '\n') { + description.resize(i); + break; + } + } + if (description.size() > 45) { + description.resize(42); + description += "..."; + } + row.push_back(description); + bool info_ready = torrent->flags_ & 1; + bool header_ready = torrent->flags_ & 2; + std::string downloaded_size = size_to_str(torrent->downloaded_size_); + std::string included_size = header_ready ? size_to_str(torrent->included_size_) : "???"; + std::string total_size = info_ready ? size_to_str(torrent->total_size_) : "???"; + std::string status; + if (torrent->flags_ & 4) { // fatal error + status = "FATAL ERROR: " + torrent->fatal_error_; + } else { + status = + torrent->completed_ + ? "COMPLETED" + : (torrent->active_download_ ? size_to_str((td::uint64)torrent->download_speed_) + "/s" : "Paused"); + } + row.push_back(downloaded_size.append("/").append(included_size)); + row.push_back(total_size); + row.push_back(status); + table.push_back(std::move(row)); + } + print_table(table, {2}); + } +}; + +int main(int argc, char* argv[]) { + SET_VERBOSITY_LEVEL(verbosity_INFO); + td::set_default_failure_signal_handler(); + td::IPAddress ip_addr; + PrivateKey client_private_key; + PublicKey server_public_key; + std::vector commands; + td::OptionParser p; + p.set_description("command-line interface for storage-daemon"); + p.add_option('h', "help", "prints_help", [&]() { + std::cout << (PSLICE() << p).c_str(); + std::exit(2); + }); + p.add_checked_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) { + auto verbosity = td::to_integer(arg); + SET_VERBOSITY_LEVEL(VERBOSITY_NAME(FATAL) + verbosity); + return (verbosity >= 0 && verbosity <= 20) ? td::Status::OK() : td::Status::Error("verbosity must be 0..20"); + }); + p.add_option('V', "version", "shows storage-daemon-cli build information", [&]() { + std::cout << "storage-daemon-cli build information: [ Commit: " << GitMetadata::CommitSHA1() + << ", Date: " << GitMetadata::CommitDate() << "]\n"; + std::exit(0); + }); + p.add_checked_option('I', "ip", "set ip:port of storage-daemon", [&](td::Slice arg) { + TRY_STATUS(ip_addr.init_host_port(arg.str())); + return td::Status::OK(); + }); + p.add_option('c', "cmd", "execute command", [&](td::Slice arg) { commands.push_back(arg.str()); }); + p.add_checked_option('k', "key", "private key", [&](td::Slice arg) { + TRY_RESULT_PREFIX(data, td::read_file(arg.str()), "failed to read: "); + TRY_RESULT_ASSIGN(client_private_key, PrivateKey::import(data)); + return td::Status::OK(); + }); + p.add_checked_option('p', "pub", "server public key", [&](td::Slice arg) { + TRY_RESULT_PREFIX(data, td::read_file(arg.str()), "failed to read: "); + TRY_RESULT_ASSIGN(server_public_key, PublicKey::import(data)); + return td::Status::OK(); + }); + + auto S = p.run(argc, argv); + if (S.is_error()) { + std::cerr << S.move_as_error().message().str() << std::endl; + std::_Exit(2); + } + LOG_IF(FATAL, client_private_key.empty()) << "Client private key is not set"; + LOG_IF(FATAL, server_public_key.empty()) << "Server public key is not set"; + + td::actor::Scheduler scheduler({0}); + scheduler.run_in_context([&] { + td::actor::create_actor("console", ip_addr, client_private_key, server_public_key, + std::move(commands)) + .release(); + }); + scheduler.run(); + return 0; +} diff --git a/storage/storage-daemon/storage-daemon.cpp b/storage/storage-daemon/storage-daemon.cpp new file mode 100644 index 00000000..1cdbb514 --- /dev/null +++ b/storage/storage-daemon/storage-daemon.cpp @@ -0,0 +1,937 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "td/utils/filesystem.h" +#include "td/actor/actor.h" +#include "td/actor/MultiPromise.h" +#include "td/utils/OptionParser.h" +#include "td/utils/port/path.h" +#include "td/utils/port/signals.h" +#include "td/utils/port/user.h" +#include "td/utils/port/IPAddress.h" +#include "td/utils/Random.h" +#include "td/utils/FileLog.h" +#include "checksum.h" +#include "git.h" +#include "auto/tl/ton_api_json.h" +#include "common/delay.h" + +#include "adnl/adnl.h" +#include "rldp2/rldp.h" +#include "dht/dht.h" +#include "overlay/overlays.h" + +#include "Torrent.h" +#include "TorrentCreator.h" +#include "StorageManager.h" +#include "StorageProvider.h" + +#if TD_DARWIN || TD_LINUX +#include +#endif +#include + +using namespace ton; + +td::BufferSlice create_query_error(td::CSlice message) { + return create_serialize_tl_object(message.str()); +} + +td::BufferSlice create_query_error(td::Status error) { + return create_query_error(error.message()); +} + +class StorageDaemon : public td::actor::Actor { + public: + StorageDaemon(td::IPAddress ip_addr, bool client_mode, std::string global_config, std::string db_root, + td::uint16 control_port, bool enable_storage_provider) + : ip_addr_(ip_addr) + , client_mode_(client_mode) + , global_config_(std::move(global_config)) + , db_root_(std::move(db_root)) + , control_port_(control_port) + , enable_storage_provider_(enable_storage_provider) { + } + + void start_up() override { + CHECK(db_root_ != ""); + td::mkdir(db_root_).ensure(); + keyring_ = keyring::Keyring::create(db_root_ + "/keyring"); + { + auto S = load_global_config(); + if (S.is_error()) { + LOG(FATAL) << "Failed to load global config: " << S; + } + } + { + auto S = load_daemon_config(); + if (S.is_error()) { + LOG(FATAL) << "Failed to load daemon config: " << S; + } + } + + init_adnl(); + + class Callback : public StorageManager::Callback { + public: + explicit Callback(td::actor::ActorId actor) : actor_(std::move(actor)) { + } + void on_ready() override { + td::actor::send_closure(actor_, &StorageDaemon::inited_storage_manager); + } + + private: + td::actor::ActorId actor_; + }; + manager_ = td::actor::create_actor("storage", local_id_, db_root_ + "/torrent", + td::make_unique(actor_id(this)), client_mode_, + adnl_.get(), rldp_.get(), overlays_.get()); + } + + td::Status load_global_config() { + TRY_RESULT_PREFIX(conf_data, td::read_file(global_config_), "failed to read: "); + TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: "); + ton_api::config_global conf; + TRY_STATUS_PREFIX(ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: "); + if (!conf.dht_) { + return td::Status::Error(ErrorCode::error, "does not contain [dht] section"); + } + TRY_RESULT_PREFIX(dht, dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: "); + dht_config_ = std::move(dht); + return td::Status::OK(); + } + + td::Status load_daemon_config() { + daemon_config_ = create_tl_object(); + auto r_conf_data = td::read_file(daemon_config_file()); + if (r_conf_data.is_ok()) { + auto conf_data = r_conf_data.move_as_ok(); + TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: "); + TRY_STATUS_PREFIX(ton_api::from_json(*daemon_config_, conf_json.get_object()), "json does not fit TL scheme: "); + return td::Status::OK(); + } + std::string keys_dir = db_root_ + "/cli-keys/"; + LOG(INFO) << "First launch, storing keys for storage-daemon-cli to " << keys_dir; + td::mkdir(keys_dir).ensure(); + auto generate_public_key = [&]() -> PublicKey { + auto pk = PrivateKey{privkeys::Ed25519::random()}; + auto pub = pk.compute_public_key(); + td::actor::send_closure(keyring_, &keyring::Keyring::add_key, std::move(pk), false, [](td::Unit) {}); + return pub; + }; + { + // Server key + daemon_config_->server_key_ = generate_public_key().tl(); + TRY_STATUS(td::write_file(keys_dir + "server.pub", serialize_tl_object(daemon_config_->server_key_, true))); + } + { + // Client key + auto pk = PrivateKey{privkeys::Ed25519::random()}; + daemon_config_->cli_key_hash_ = pk.compute_short_id().bits256_value(); + TRY_STATUS(td::write_file(keys_dir + "client", serialize_tl_object(pk.tl(), true))); + } + daemon_config_->adnl_id_ = generate_public_key().tl(); + daemon_config_->dht_id_ = generate_public_key().tl(); + return save_daemon_config(); + } + + td::Status save_daemon_config() { + auto s = td::json_encode(td::ToJson(*daemon_config_), true); + TRY_STATUS_PREFIX(td::write_file(daemon_config_file(), s), "Failed to write daemon config: "); + return td::Status::OK(); + } + + void init_adnl() { + CHECK(ip_addr_.is_valid()); + + adnl_network_manager_ = adnl::AdnlNetworkManager::create(static_cast(ip_addr_.get_port())); + adnl_ = adnl::Adnl::create(db_root_, keyring_.get()); + td::actor::send_closure(adnl_, &adnl::Adnl::register_network_manager, adnl_network_manager_.get()); + adnl::AdnlCategoryMask cat_mask; + cat_mask[0] = true; + td::actor::send_closure(adnl_network_manager_, &adnl::AdnlNetworkManager::add_self_addr, ip_addr_, + std::move(cat_mask), 0); + + adnl::AdnlAddressList addr_list; + if (!client_mode_) { + addr_list.add_udp_address(ip_addr_).ensure(); + } + addr_list.set_version(static_cast(td::Clocks::system())); + addr_list.set_reinit_date(adnl::Adnl::adnl_start_time()); + + adnl::AdnlNodeIdFull local_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->adnl_id_).move_as_ok(); + local_id_ = local_id_full.compute_short_id(); + td::actor::send_closure(adnl_, &adnl::Adnl::add_id, local_id_full, addr_list, static_cast(0)); + adnl::AdnlNodeIdFull dht_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->dht_id_).move_as_ok(); + dht_id_ = dht_id_full.compute_short_id(); + td::actor::send_closure(adnl_, &adnl::Adnl::add_id, dht_id_full, addr_list, static_cast(0)); + + if (client_mode_) { + auto D = dht::Dht::create_client(dht_id_, db_root_, dht_config_, keyring_.get(), adnl_.get()); + D.ensure(); + dht_ = D.move_as_ok(); + } else { + auto D = dht::Dht::create(dht_id_, db_root_, dht_config_, keyring_.get(), adnl_.get()); + D.ensure(); + dht_ = D.move_as_ok(); + } + td::actor::send_closure(adnl_, &adnl::Adnl::register_dht_node, dht_.get()); + + rldp_ = ton_rldp::Rldp::create(adnl_.get()); + td::actor::send_closure(rldp_, &ton_rldp::Rldp::add_id, local_id_); + overlays_ = overlay::Overlays::create(db_root_, keyring_.get(), adnl_.get(), dht_.get()); + } + + void inited_storage_manager() { + if (enable_storage_provider_) { + if (!daemon_config_->provider_address_.empty()) { + auto provider_account = ContractAddress::parse(daemon_config_->provider_address_).move_as_ok(); + init_tonlib_client(); + provider_ = td::actor::create_actor("provider", provider_account, db_root_ + "/provider", + tonlib_client_.get(), manager_.get(), keyring_.get()); + } else { + LOG(WARNING) << "Storage provider account is not set, it can be set in storage-daemon-cli"; + } + } + init_control_interface(); + } + + void init_control_interface() { + if (control_port_ == 0) { + return; + } + + auto adnl_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->server_key_).move_as_ok(); + auto adnl_id = adnl_id_full.compute_short_id(); + td::actor::send_closure(adnl_, &adnl::Adnl::add_id, adnl_id_full, adnl::AdnlAddressList(), + static_cast(255)); + + class Callback : public adnl::Adnl::Callback { + public: + explicit Callback(td::actor::ActorId id) : self_id_(id) { + } + void receive_message(adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data) override { + } + void receive_query(adnl::AdnlNodeIdShort src, adnl::AdnlNodeIdShort dst, td::BufferSlice data, + td::Promise promise) override { + td::actor::send_closure(self_id_, &StorageDaemon::process_control_query, src, std::move(data), + std::move(promise)); + } + + private: + td::actor::ActorId self_id_; + }; + td::actor::send_closure(adnl_, &adnl::Adnl::subscribe, adnl_id, "", std::make_unique(actor_id(this))); + td::actor::send_closure(adnl_, &adnl::Adnl::create_ext_server, std::vector{adnl_id}, + std::vector{control_port_}, + [SelfId = actor_id(this)](td::Result> R) { + if (R.is_error()) { + LOG(ERROR) << "Failed to init control interface: " << R.move_as_error(); + return; + } + td::actor::send_closure(SelfId, &StorageDaemon::created_ext_server, R.move_as_ok()); + }); + } + + void created_ext_server(td::actor::ActorOwn ext_server) { + ext_server_ = std::move(ext_server); + LOG(INFO) << "Started control interface on port " << control_port_; + } + + void process_control_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise promise) { + promise = [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_value(create_query_error(R.move_as_error())); + } else { + promise.set_value(R.move_as_ok()); + } + }; + if (src.bits256_value() != daemon_config_->cli_key_hash_) { + promise.set_error(td::Status::Error("Not authorized")); + return; + } + auto F = fetch_tl_object(data, true); + if (F.is_error()) { + promise.set_error(F.move_as_error_prefix("failed to parse control query: ")); + return; + } + auto f = F.move_as_ok(); + LOG(DEBUG) << "Running control query " << f->get_id(); + ton_api::downcast_call(*f, [&](auto &obj) { run_control_query(obj, std::move(promise)); }); + } + + void run_control_query(ton_api::storage_daemon_setVerbosity &query, td::Promise promise) { + if (query.verbosity_ < 0 || query.verbosity_ > 10) { + promise.set_value(create_query_error("verbosity should be in range [0..10]")); + return; + } + SET_VERBOSITY_LEVEL(VERBOSITY_NAME(FATAL) + query.verbosity_); + promise.set_result(create_serialize_tl_object()); + } + + void run_control_query(ton_api::storage_daemon_createTorrent &query, td::Promise promise) { + // Run in a separate thread + delay_action( + [promise = std::move(promise), manager = manager_.get(), query = std::move(query)]() mutable { + Torrent::Creator::Options options; + options.piece_size = 128 * 1024; + options.description = std::move(query.description_); + TRY_RESULT_PROMISE(promise, torrent, Torrent::Creator::create_from_path(std::move(options), query.path_)); + td::Bits256 hash = torrent.get_hash(); + td::actor::send_closure(manager, &StorageManager::add_torrent, std::move(torrent), false, + [manager, hash, promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + } else { + get_torrent_info_full_serialized(manager, hash, std::move(promise)); + } + }); + }, + td::Timestamp::now()); + } + + void run_control_query(ton_api::storage_daemon_addByHash &query, td::Promise promise) { + td::Bits256 hash = query.hash_; + bool start_download_now = query.start_download_ && query.priorities_.empty(); + td::actor::send_closure( + manager_, &StorageManager::add_torrent_by_hash, hash, std::move(query.root_dir_), start_download_now, + query_add_torrent_cont(hash, query.start_download_, std::move(query.priorities_), std::move(promise))); + } + + void run_control_query(ton_api::storage_daemon_addByMeta &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, meta, TorrentMeta::deserialize(query.meta_)); + td::Bits256 hash(meta.info.get_hash()); + bool start_download_now = query.start_download_ && query.priorities_.empty(); + td::actor::send_closure( + manager_, &StorageManager::add_torrent_by_meta, std::move(meta), std::move(query.root_dir_), start_download_now, + query_add_torrent_cont(hash, query.start_download_, std::move(query.priorities_), std::move(promise))); + } + + td::Promise query_add_torrent_cont(td::Bits256 hash, bool start_download, + std::vector> priorities, + td::Promise promise) { + return [manager = manager_.get(), hash, start_download = start_download, priorities = std::move(priorities), + promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + if (!priorities.empty()) { + for (auto &p : priorities) { + ton_api::downcast_call( + *p, td::overloaded( + [&](ton_api::storage_priorityAction_all &obj) { + td::actor::send_closure(manager, &StorageManager::set_all_files_priority, hash, + (td::uint8)obj.priority_, [](td::Result) {}); + }, + [&](ton_api::storage_priorityAction_idx &obj) { + td::actor::send_closure(manager, &StorageManager::set_file_priority_by_idx, hash, obj.idx_, + (td::uint8)obj.priority_, [](td::Result) {}); + }, + [&](ton_api::storage_priorityAction_name &obj) { + td::actor::send_closure(manager, &StorageManager::set_file_priority_by_name, hash, + std::move(obj.name_), (td::uint8)obj.priority_, + [](td::Result) {}); + })); + } + if (start_download) { + td::actor::send_closure(manager, &StorageManager::set_active_download, hash, true, + [](td::Result) {}); + } + } + get_torrent_info_full_serialized(manager, hash, std::move(promise)); + }; + } + + void run_control_query(ton_api::storage_daemon_setActiveDownload &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::set_active_download, query.hash_, query.active_, + promise.wrap([](td::Unit &&) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_getTorrents &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::get_all_torrents, + [manager = manager_.get(), promise = std::move(promise)](td::Result> R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + std::vector torrents = R.move_as_ok(); + auto result = std::make_shared>>(torrents.size()); + td::MultiPromise mp; + auto ig = mp.init_guard(); + for (size_t i = 0; i < torrents.size(); ++i) { + get_torrent_info_short(manager, torrents[i], + [i, result, promise = ig.get_promise()]( + td::Result> R) mutable { + if (R.is_ok()) { + result->at(i) = R.move_as_ok(); + } + promise.set_result(td::Unit()); + }); + } + ig.add_promise([promise = std::move(promise), result](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + return; + } + auto v = std::move(*result); + v.erase(std::remove(v.begin(), v.end(), nullptr), v.end()); + promise.set_result(create_serialize_tl_object(std::move(v))); + }); + }); + } + + void run_control_query(ton_api::storage_daemon_getTorrentFull &query, td::Promise promise) { + get_torrent_info_full_serialized(manager_.get(), query.hash_, std::move(promise)); + } + + void run_control_query(ton_api::storage_daemon_getTorrentMeta &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::with_torrent, query.hash_, + promise.wrap([](NodeActor::NodeState state) -> td::Result { + Torrent &torrent = state.torrent; + if (!torrent.inited_info()) { + return td::Status::Error("Torrent meta is not available"); + } + std::string meta_str = torrent.get_meta(Torrent::GetMetaOptions().with_proof_depth_limit(10)).serialize(); + return create_serialize_tl_object(td::BufferSlice(meta_str)); + })); + } + + void run_control_query(ton_api::storage_daemon_getTorrentPeers &query, td::Promise promise) { + td::actor::send_closure(manager_, &StorageManager::get_peers_info, query.hash_, + promise.wrap([](tl_object_ptr obj) -> td::BufferSlice { + return serialize_tl_object(obj, true); + })); + } + + void run_control_query(ton_api::storage_daemon_setFilePriorityAll &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe(query.priority_)); + td::actor::send_closure(manager_, &StorageManager::set_all_files_priority, query.hash_, priority, + promise.wrap([](bool done) -> td::Result { + if (done) { + return create_serialize_tl_object(); + } else { + return create_serialize_tl_object(); + } + })); + } + + void run_control_query(ton_api::storage_daemon_setFilePriorityByIdx &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe(query.priority_)); + td::actor::send_closure(manager_, &StorageManager::set_file_priority_by_idx, query.hash_, query.idx_, priority, + promise.wrap([](bool done) -> td::Result { + if (done) { + return create_serialize_tl_object(); + } else { + return create_serialize_tl_object(); + } + })); + } + + void run_control_query(ton_api::storage_daemon_setFilePriorityByName &query, td::Promise promise) { + TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe(query.priority_)); + td::actor::send_closure(manager_, &StorageManager::set_file_priority_by_name, query.hash_, std::move(query.name_), + priority, promise.wrap([](bool done) -> td::Result { + if (done) { + return create_serialize_tl_object(); + } else { + return create_serialize_tl_object(); + } + })); + } + + void run_control_query(ton_api::storage_daemon_removeTorrent &query, td::Promise promise) { + td::actor::send_closure( + manager_, &StorageManager::remove_torrent, query.hash_, query.remove_files_, + promise.wrap([](td::Unit &&) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_loadFrom &query, td::Promise promise) { + td::optional meta; + if (!query.meta_.empty()) { + TRY_RESULT_PROMISE_ASSIGN(promise, meta, TorrentMeta::deserialize(query.meta_)); + } + td::actor::send_closure( + manager_, &StorageManager::load_from, query.hash_, std::move(meta), std::move(query.path_), + [manager = manager_.get(), hash = query.hash_, promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + } else { + get_torrent_info_short(manager, hash, promise.wrap([](tl_object_ptr obj) { + return serialize_tl_object(obj, true); + })); + } + }); + } + + void run_control_query(ton_api::storage_daemon_getNewContractMessage &query, td::Promise promise) { + td::Promise> P = + [promise = std::move(promise), hash = query.hash_, query_id = query.query_id_, + manager = manager_.get()](td::Result> R) mutable { + TRY_RESULT_PROMISE(promise, r, std::move(R)); + td::actor::send_closure( + manager, &StorageManager::with_torrent, hash, + promise.wrap([r = std::move(r), query_id](NodeActor::NodeState state) -> td::Result { + Torrent &torrent = state.torrent; + if (!torrent.is_completed()) { + return td::Status::Error("Torrent is not complete"); + } + TRY_RESULT(microchunk_tree, MicrochunkTree::Builder::build_for_torrent(torrent, 1LL << 60)); + td::Ref msg = create_new_contract_message_body( + torrent.get_info().as_cell(), microchunk_tree.get_root_hash(), query_id, r.first, r.second); + return create_serialize_tl_object( + vm::std_boc_serialize(msg).move_as_ok(), r.first->to_dec_string(), r.second); + })); + }; + + ton_api::downcast_call(*query.params_, + td::overloaded( + [&](ton_api::storage_daemon_newContractParams &obj) { + td::RefInt256 rate = td::string_to_int256(obj.rate_); + if (rate.is_null() || rate->sgn() < 0) { + P.set_error(td::Status::Error("Invalid rate")); + return; + } + P.set_result(std::make_pair(std::move(rate), (td::uint32)obj.max_span_)); + }, + [&](ton_api::storage_daemon_newContractParamsAuto &obj) { + TRY_RESULT_PROMISE(P, address, ContractAddress::parse(obj.provider_address_)); + init_tonlib_client(); + StorageProvider::get_provider_params( + tonlib_client_.get(), address, P.wrap([](ProviderParams params) { + return std::make_pair(std::move(params.rate_per_mb_day), params.max_span); + })); + })); + } + + void run_control_query(ton_api::storage_daemon_importPrivateKey &query, td::Promise promise) { + auto pk = ton::PrivateKey{query.key_}; + td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false, + promise.wrap([hash = pk.compute_short_id()](td::Unit) mutable { + return create_serialize_tl_object(hash.bits256_value()); + })); + } + + void run_control_query(ton_api::storage_daemon_deployProvider &query, td::Promise promise) { + if (!enable_storage_provider_) { + promise.set_error( + td::Status::Error("Storage provider is not enabled, run daemon with --storage-provider to enable it")); + return; + } + if (!provider_.empty() || deploying_provider_) { + promise.set_error(td::Status::Error("Storage provider already exists")); + return; + } + TRY_RESULT_PROMISE_ASSIGN(promise, deploying_provider_, generate_fabric_contract(keyring_.get())); + promise.set_result(create_serialize_tl_object( + deploying_provider_.value().address.to_string())); + do_deploy_provider(); + } + + void run_control_query(ton_api::storage_daemon_initProvider &query, td::Promise promise) { + if (!enable_storage_provider_) { + promise.set_error( + td::Status::Error("Storage provider is not enabled, run daemon with --storage-provider to enable it")); + return; + } + if (!provider_.empty() || deploying_provider_) { + promise.set_error(td::Status::Error("Storage provider already exists")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.account_address_), "Invalid address: "); + do_init_provider( + address, promise.wrap([](td::Unit) { return create_serialize_tl_object(); })); + } + + void do_init_provider(ContractAddress address, td::Promise promise, bool deploying = false) { + if (deploying && (!deploying_provider_ || deploying_provider_.value().address != address)) { + promise.set_error(td::Status::Error("Deploying was cancelled")); + return; + } + daemon_config_->provider_address_ = address.to_string(); + TRY_STATUS_PROMISE(promise, save_daemon_config()); + init_tonlib_client(); + provider_ = td::actor::create_actor("provider", address, db_root_ + "/provider", + tonlib_client_.get(), manager_.get(), keyring_.get()); + deploying_provider_ = {}; + promise.set_result(td::Unit()); + } + + void do_deploy_provider() { + if (!deploying_provider_) { + return; + } + init_tonlib_client(); + check_contract_exists( + deploying_provider_.value().address, tonlib_client_.get(), + [SelfId = actor_id(this), client = tonlib_client_.get(), + init = deploying_provider_.value()](td::Result R) mutable { + if (R.is_error()) { + LOG(INFO) << "Deploying storage contract: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &StorageDaemon::do_deploy_provider); }, + td::Timestamp::in(5.0)); + return; + } + if (R.ok()) { + LOG(INFO) << "Deploying storage contract: DONE"; + td::actor::send_closure( + SelfId, &StorageDaemon::do_init_provider, init.address, [](td::Result) {}, true); + return; + } + ContractAddress address = init.address; + td::BufferSlice state_init_boc = vm::std_boc_serialize(init.state_init).move_as_ok(); + td::BufferSlice body_boc = vm::std_boc_serialize(init.msg_body).move_as_ok(); + auto query = create_tl_object( + create_tl_object(address.to_string()), state_init_boc.as_slice().str(), + body_boc.as_slice().str()); + td::actor::send_closure( + client, &tonlib::TonlibClientWrapper::send_request, + std::move(query), [=](td::Result> R) { + if (R.is_error()) { + LOG(INFO) << "Deploying storage contract: " << R.move_as_error(); + } + delay_action([=]() { td::actor::send_closure(SelfId, &StorageDaemon::do_deploy_provider); }, + td::Timestamp::in(5.0)); + }); + }); + } + + void run_control_query(ton_api::storage_daemon_removeStorageProvider &query, td::Promise promise) { + if (!enable_storage_provider_) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + if (provider_.empty() && !deploying_provider_) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + daemon_config_->provider_address_ = ""; + TRY_STATUS_PROMISE(promise, save_daemon_config()); + deploying_provider_ = {}; + provider_ = {}; + auto S = td::rmrf(db_root_ + "/provider"); + if (S.is_error()) { + LOG(ERROR) << "Failed to delete provider directory: " << S; + } + promise.set_result(create_serialize_tl_object()); + } + + void run_control_query(ton_api::storage_daemon_getProviderParams &query, td::Promise promise) { + if (!query.address_.empty()) { + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: "); + init_tonlib_client(); + StorageProvider::get_provider_params(tonlib_client_.get(), address, promise.wrap([](ProviderParams params) { + return serialize_tl_object(params.tl(), true); + })); + } + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + td::actor::send_closure(provider_, &StorageProvider::get_params, promise.wrap([](ProviderParams params) { + return serialize_tl_object(params.tl(), true); + })); + } + + void run_control_query(ton_api::storage_daemon_setProviderParams &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE(promise, params, ProviderParams::create(query.params_)); + td::actor::send_closure( + provider_, &StorageProvider::set_params, std::move(params), + promise.wrap([](td::Unit) mutable { return create_serialize_tl_object(); })); + } + + template + void run_control_query(T &query, td::Promise promise) { + promise.set_error(td::Status::Error("unknown query")); + } + + void run_control_query(ton_api::storage_daemon_getProviderInfo &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + td::actor::send_closure(provider_, &StorageProvider::get_provider_info, query.with_balances_, query.with_contracts_, + promise.wrap([](tl_object_ptr info) { + return serialize_tl_object(info, true); + })); + } + + void run_control_query(ton_api::storage_daemon_setProviderConfig &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + td::actor::send_closure( + provider_, &StorageProvider::set_provider_config, StorageProvider::Config(query.config_), + promise.wrap([](td::Unit) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_withdraw &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.contract_), "Invalid address: "); + td::actor::send_closure(provider_, &StorageProvider::withdraw, address, promise.wrap([](td::Unit) { + return create_serialize_tl_object(); + })); + } + + void run_control_query(ton_api::storage_daemon_sendCoins &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: "); + td::RefInt256 amount = td::string_to_int256(query.amount_); + if (amount.is_null()) { + promise.set_error(td::Status::Error("Invalid amount")); + return; + } + td::actor::send_closure( + provider_, &StorageProvider::send_coins, address, amount, std::move(query.message_), + promise.wrap([](td::Unit) { return create_serialize_tl_object(); })); + } + + void run_control_query(ton_api::storage_daemon_closeStorageContract &query, td::Promise promise) { + if (provider_.empty()) { + promise.set_error(td::Status::Error("No storage provider")); + return; + } + TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: "); + td::actor::send_closure(provider_, &StorageProvider::close_storage_contract, address, promise.wrap([](td::Unit) { + return create_serialize_tl_object(); + })); + } + + private: + static void fill_torrent_info_short(Torrent &torrent, ton_api::storage_daemon_torrent &obj) { + obj.hash_ = torrent.get_hash(); + obj.root_dir_ = torrent.get_root_dir(); + if (torrent.inited_info()) { + const Torrent::Info &info = torrent.get_info(); + obj.flags_ = 1; + if (torrent.inited_header()) { + obj.flags_ |= 2; + } + obj.total_size_ = info.file_size; + obj.description_ = info.description; + if (torrent.inited_header()) { + obj.included_size_ = torrent.get_included_size(); + obj.files_count_ = torrent.get_files_count().unwrap(); + obj.dir_name_ = torrent.get_header().dir_name; + } + obj.downloaded_size_ = torrent.get_included_ready_size(); + obj.completed_ = torrent.is_completed(); + } else { + obj.flags_ = 0; + obj.downloaded_size_ = 0; + obj.completed_ = false; + } + if (torrent.get_fatal_error().is_error()) { + obj.flags_ |= 4; + obj.fatal_error_ = torrent.get_fatal_error().message().str(); + } + } + + static void fill_torrent_info_full(Torrent &torrent, ton_api::storage_daemon_torrentFull &obj) { + if (!obj.torrent_) { + obj.torrent_ = create_tl_object(); + } + fill_torrent_info_short(torrent, *obj.torrent_); + obj.files_.clear(); + auto count = torrent.get_files_count(); + if (!count) { + return; + } + for (size_t i = 0; i < count.value(); ++i) { + auto file = create_tl_object(); + file->name_ = torrent.get_file_name(i).str(); + file->size_ = torrent.get_file_size(i); + file->downloaded_size_ = torrent.get_file_ready_size(i); + obj.files_.push_back(std::move(file)); + } + } + + static void get_torrent_info_short(td::actor::ActorId manager, td::Bits256 hash, + td::Promise> promise) { + td::actor::send_closure(manager, &StorageManager::with_torrent, hash, + [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_result(R.move_as_error()); + return; + } + auto state = R.move_as_ok(); + auto obj = create_tl_object(); + fill_torrent_info_short(state.torrent, *obj); + obj->active_download_ = state.active_download; + obj->download_speed_ = state.download_speed; + obj->upload_speed_ = state.upload_speed; + promise.set_result(std::move(obj)); + }); + } + + static void get_torrent_info_full_serialized(td::actor::ActorId manager, td::Bits256 hash, + td::Promise promise) { + td::actor::send_closure(manager, &StorageManager::with_torrent, hash, + [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_error(R.move_as_error()); + } else { + auto state = R.move_as_ok(); + auto obj = create_tl_object(); + fill_torrent_info_full(state.torrent, *obj); + obj->torrent_->active_download_ = state.active_download; + obj->torrent_->download_speed_ = state.download_speed; + obj->torrent_->upload_speed_ = state.upload_speed; + for (size_t i = 0; i < obj->files_.size(); ++i) { + obj->files_[i]->priority_ = + (i < state.file_priority.size() ? state.file_priority[i] : 1); + } + promise.set_result(serialize_tl_object(obj, true)); + } + }); + } + + td::IPAddress ip_addr_; + bool client_mode_; + std::string global_config_; + std::string db_root_; + td::uint16 control_port_; + bool enable_storage_provider_; + + tl_object_ptr daemon_config_; + std::shared_ptr dht_config_; + adnl::AdnlNodeIdShort local_id_; + adnl::AdnlNodeIdShort dht_id_; + + td::actor::ActorOwn keyring_; + td::actor::ActorOwn adnl_network_manager_; + td::actor::ActorOwn adnl_; + td::actor::ActorOwn dht_; + td::actor::ActorOwn rldp_; + td::actor::ActorOwn overlays_; + td::actor::ActorOwn ext_server_; + + td::actor::ActorOwn manager_; + + td::actor::ActorOwn tonlib_client_; + td::actor::ActorOwn provider_; + td::optional deploying_provider_; + + void init_tonlib_client() { + if (!tonlib_client_.empty()) { + return; + } + auto r_conf_data = td::read_file(global_config_); + r_conf_data.ensure(); + auto tonlib_options = tonlib_api::make_object( + tonlib_api::make_object(r_conf_data.move_as_ok().as_slice().str(), "", false, false), + tonlib_api::make_object()); + tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); + } + + std::string daemon_config_file() { + return db_root_ + "/config.json"; + } +}; + +int main(int argc, char *argv[]) { + SET_VERBOSITY_LEVEL(verbosity_WARNING); + td::set_default_failure_signal_handler().ensure(); + + td::unique_ptr logger_; + SCOPE_EXIT { + td::log_interface = td::default_log_interface; + }; + + td::IPAddress ip_addr; + bool client_mode = false; + std::string global_config, db_root; + td::uint16 control_port = 0; + bool enable_storage_provider = false; + + td::OptionParser p; + p.set_description("Server for seeding and downloading bags of files (torrents)\n"); + p.add_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) { + int v = VERBOSITY_NAME(FATAL) + (td::to_integer(arg)); + SET_VERBOSITY_LEVEL(v); + }); + p.add_option('V', "version", "shows storage-daemon build information", [&]() { + std::cout << "storage-daemon build information: [ Commit: " << GitMetadata::CommitSHA1() + << ", Date: " << GitMetadata::CommitDate() << "]\n"; + std::exit(0); + }); + p.add_option('h', "help", "prints a help message", [&]() { + char b[10240]; + td::StringBuilder sb(td::MutableSlice{b, 10000}); + sb << p; + std::cout << sb.as_cslice().c_str(); + std::exit(2); + }); + p.add_checked_option('I', "ip", "set : for adnl. : for client mode", + [&](td::Slice arg) -> td::Status { + if (ip_addr.is_valid()) { + return td::Status::Error("Duplicate ip address"); + } + if (!arg.empty() && arg[0] == ':') { + TRY_RESULT(port, td::to_integer_safe(arg.substr(1))); + TRY_STATUS(ip_addr.init_ipv4_port("127.0.0.1", port)); + client_mode = true; + } else { + TRY_STATUS(ip_addr.init_host_port(arg.str())); + } + return td::Status::OK(); + }); + p.add_checked_option('p', "control-port", "port for control interface", [&](td::Slice arg) -> td::Status { + TRY_RESULT_ASSIGN(control_port, td::to_integer_safe(arg)); + return td::Status::OK(); + }); + p.add_option('C', "global-config", "global TON configuration file", + [&](td::Slice arg) { global_config = arg.str(); }); + p.add_option('D', "db", "db root", [&](td::Slice arg) { db_root = arg.str(); }); + p.add_option('d', "daemonize", "set SIGHUP", [&]() { + td::set_signal_handler(td::SignalType::HangUp, [](int sig) { +#if TD_DARWIN || TD_LINUX + close(0); + setsid(); +#endif + }).ensure(); + }); + p.add_option('l', "logname", "log to file", [&](td::Slice fname) { + logger_ = td::FileLog::create(fname.str()).move_as_ok(); + td::log_interface = logger_.get(); + }); + p.add_option('P', "storage-provider", "run storage provider", [&]() { enable_storage_provider = true; }); + + td::actor::Scheduler scheduler({7}); + + scheduler.run_in_context([&] { + p.run(argc, argv).ensure(); + td::actor::create_actor("storage-daemon", ip_addr, client_mode, global_config, db_root, control_port, + enable_storage_provider) + .release(); + }); + while (scheduler.run(1)) { + } +} diff --git a/storage/test/storage.cpp b/storage/test/storage.cpp index 7bdd03de..b4f67b9b 100644 --- a/storage/test/storage.cpp +++ b/storage/test/storage.cpp @@ -58,7 +58,6 @@ #include "Bitset.h" #include "PeerState.h" -#include "SharedState.h" #include "Torrent.h" #include "TorrentCreator.h" @@ -778,7 +777,7 @@ TEST(Rldp, Main) { RldpBasicTest::run(Options::create(1, 100 * MegaByte, NetChannel::Options::perfect_net())); } - +/* TEST(MerkleTree, Manual) { td::Random::Xorshift128plus rnd(123); // create big random file @@ -803,7 +802,7 @@ TEST(MerkleTree, Manual) { timer = {}; LOG(INFO) << "Init merkle tree"; size_t i = 0; - ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Chunk{i++, x}; })); + ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Piece{i++, x}; })); LOG(INFO) << timer; auto root_proof = tree.gen_proof(0, chunks_count - 1).move_as_ok(); @@ -830,15 +829,15 @@ TEST(MerkleTree, Manual) { other_new_tree.add_proof(tree.gen_proof(i, i + stride - 1).move_as_ok()).ensure(); other_new_tree.gen_proof(i, i + stride - 1).ensure(); other_new_tree.get_root(2); - std::vector chunks; + std::vector chunks; for (size_t j = 0; j < stride && i + j < chunks_count; j++) { chunks.push_back({i + j, hashes.at(i + j)}); } - new_tree.try_add_chunks(chunks).ensure(); + new_tree.try_add_pieces(chunks).ensure(); } if (stride == 1) { - std::vector chunks; + std::vector chunks; for (size_t i = 0; i < chunks_count; i++) { if (rnd.fast(0, 1) == 1) { @@ -848,7 +847,7 @@ TEST(MerkleTree, Manual) { } } td::Bitset bitmask; - other_new_tree.add_chunks(chunks, bitmask); + other_new_tree.add_pieces(chunks, bitmask); for (size_t i = 0; i < chunks_count; i++) { auto expected = chunks[i].hash == hashes[i]; auto got = bitmask.get(i); @@ -874,9 +873,9 @@ TEST(MerkleTree, Stress) { } } size_t i = 0; - ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Chunk{i++, x}; })); + ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Piece{i++, x}; })); for (int t2 = 0; t2 < 1000; t2++) { - std::vector chunks; + std::vector chunks; int mask = rnd.fast(0, (1 << chunks_count) - 1); for (size_t i = 0; i < chunks_count; i++) { @@ -889,8 +888,8 @@ TEST(MerkleTree, Stress) { td::Bitset bitmask_strict; td::Bitset bitmask; ton::MerkleTree new_tree(chunks_count, tree.get_root(rnd.fast(1, 5))); - tree.add_chunks(chunks, bitmask_strict); - new_tree.add_chunks(chunks, bitmask); + tree.add_pieces(chunks, bitmask_strict); + new_tree.add_pieces(chunks, bitmask); for (size_t i = 0; i < chunks_count; i++) { auto expected = chunks[i].hash == hashes[i]; auto strict_got = bitmask_strict.get(i); @@ -901,7 +900,7 @@ TEST(MerkleTree, Stress) { } } } -}; +};*/ struct TorrentMetas { td::optional torrent; @@ -985,6 +984,7 @@ TEST(Torrent, Meta) { torrent_file.header = {}; torrent_file.root_proof = {}; auto new_torrent = ton::Torrent::open(options, torrent_file).move_as_ok(); + new_torrent.enable_write_to_files(); std::vector order; for (size_t i = 0; i < torrent.get_info().pieces_count(); i++) { @@ -1039,6 +1039,7 @@ TEST(Torrent, OneFile) { ton::Torrent::Options options; options.root_dir = "second/"; auto other_torrent = ton::Torrent::open(options, meta).move_as_ok(); + other_torrent.enable_write_to_files(); CHECK(!other_torrent.is_completed()); other_torrent.add_piece(0, torrent.get_piece_data(0).move_as_ok(), torrent.get_piece_proof(0).move_as_ok()) .ensure(); @@ -1190,17 +1191,12 @@ TEST(Torrent, Peer) { } }; - class PeerCreator : public ton::NodeActor::Callback { + class PeerCreator : public ton::NodeActor::NodeCallback { public: - PeerCreator(td::actor::ActorId peer_manager, ton::PeerId self_id, std::vector peers, - std::shared_ptr stop_watcher, std::shared_ptr complete_watcher) - : peer_manager_(std::move(peer_manager)) - , peers_(std::move(peers)) - , self_id_(self_id) - , stop_watcher_(stop_watcher) - , complete_watcher_(complete_watcher) { + PeerCreator(td::actor::ActorId peer_manager, ton::PeerId self_id, std::vector peers) + : peer_manager_(std::move(peer_manager)), peers_(std::move(peers)), self_id_(self_id) { } - void get_peers(td::Promise> promise) override { + void get_peers(ton::PeerId src, td::Promise> promise) override { auto peers = peers_; promise.set_value(std::move(peers)); } @@ -1209,7 +1205,7 @@ TEST(Torrent, Peer) { send_closure(peer_manager_, &PeerManager::register_node, self_id_, self_); } td::actor::ActorOwn create_peer(ton::PeerId self_id, ton::PeerId peer_id, - td::SharedState state) override { + std::shared_ptr state) override { class PeerCallback : public ton::PeerActor::Callback { public: PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId peer_manager) @@ -1254,6 +1250,19 @@ TEST(Torrent, Peer) { std::move(state)); } + private: + td::actor::ActorId peer_manager_; + std::vector peers_; + ton::PeerId self_id_; + td::actor::ActorId self_; + }; + + class TorrentCallback : public ton::NodeActor::Callback { + public: + TorrentCallback(std::shared_ptr stop_watcher, std::shared_ptr complete_watcher) + : stop_watcher_(stop_watcher), complete_watcher_(complete_watcher) { + } + void on_completed() override { complete_watcher_.reset(); } @@ -1265,12 +1274,8 @@ TEST(Torrent, Peer) { } private: - td::actor::ActorId peer_manager_; - std::vector peers_; - ton::PeerId self_id_; std::shared_ptr stop_watcher_; std::shared_ptr complete_watcher_; - td::actor::ActorId self_; }; size_t peers_n = 20; @@ -1314,12 +1319,11 @@ TEST(Torrent, Peer) { alarm_timestamp() = td::Timestamp::in(1); } void alarm() override { - send_closure(node_actor_, &ton::NodeActor::with_torrent, [](td::Result r_torrent) { - if (r_torrent.is_error()) { + send_closure(node_actor_, &ton::NodeActor::with_torrent, [](td::Result r_state) { + if (r_state.is_error()) { return; } - auto torrent = r_torrent.move_as_ok(); - print_debug(torrent); + print_debug(&r_state.ok().torrent); }); alarm_timestamp() = td::Timestamp::in(4); } @@ -1337,14 +1341,17 @@ TEST(Torrent, Peer) { auto peer_manager = td::actor::create_actor("PeerManager"); guard->push_back(td::actor::create_actor( "Node#1", 1, std::move(torrent), - td::make_unique(peer_manager.get(), 1, gen_peers(1, 2), stop_watcher, complete_watcher))); + td::make_unique(stop_watcher, complete_watcher), + td::make_unique(peer_manager.get(), 1, gen_peers(1, 2)), nullptr)); for (size_t i = 2; i <= peers_n; i++) { ton::Torrent::Options options; options.in_memory = true; auto other_torrent = ton::Torrent::open(options, ton::TorrentMeta(info)).move_as_ok(); auto node_actor = td::actor::create_actor( PSLICE() << "Node#" << i, i, std::move(other_torrent), - td::make_unique(peer_manager.get(), i, gen_peers(i, 2), stop_watcher, complete_watcher)); + td::make_unique(stop_watcher, complete_watcher), + td::make_unique(peer_manager.get(), i, gen_peers(i, 2)), + nullptr); if (i == 3) { td::actor::create_actor("StatsActor", node_actor.get()).release(); diff --git a/tl/generate/scheme/ton_api.tl b/tl/generate/scheme/ton_api.tl index 8b5a49f7..99f79da9 100644 --- a/tl/generate/scheme/ton_api.tl +++ b/tl/generate/scheme/ton_api.tl @@ -701,8 +701,9 @@ storage.ok = Ok; storage.state will_upload:Bool want_download:Bool = storage.State; storage.piece proof:bytes data:bytes = storage.Piece; +storage.torrentInfo data:bytes = storage.TorrentInfo; -storage.updateInit have_pieces:bytes state:storage.State = storage.Update; +storage.updateInit have_pieces:bytes have_pieces_offset:int state:storage.State = storage.Update; storage.updateHavePieces piece_id:(vector int) = storage.Update; storage.updateState state:storage.State = storage.Update; @@ -711,10 +712,9 @@ storage.updateState state:storage.State = storage.Update; storage.ping session_id:long = storage.Pong; storage.addUpdate session_id:long seqno:int update:storage.Update = Ok; +storage.getTorrentInfo = storage.TorrentInfo; storage.getPiece piece_id:int = storage.Piece; -storage.queryPrefix id:int256 = Object; - ---types--- http.header name:string value:string = http.Header; @@ -745,3 +745,116 @@ validatorSession.statsRound timestamp:long producers:(vector validatorSession.st validatorSession.stats id:tonNode.blockId timestamp:long self:int256 creator:int256 total_validators:int total_weight:long signatures:int signatures_weight:long approve_signatures:int approve_signatures_weight:long first_round:int rounds:(vector validatorSession.statsRound) = validatorSession.Stats; + +---functions--- + +---types--- + +storage.db.key.torrentList = storage.db.key.TorrentList; +storage.db.key.torrent hash:int256 = storage.db.key.TorrentShort; +storage.db.key.torrentMeta hash:int256 = storage.db.key.TorrentMeta; +storage.db.key.priorities hash:int256 = storage.db.key.Priorities; +storage.db.key.piecesInDb hash:int256 = storage.db.key.PiecesInDb; +storage.db.key.pieceInDb hash:int256 idx:long = storage.db.key.PieceInDb; + +storage.db.torrentList torrents:(vector int256) = storage.db.TorrentList; +storage.db.torrent root_dir:string active_download:Bool = storage.db.TorrentShort; +storage.db.priorities actions:(vector storage.PriorityAction) = storage.db.Priorities; +storage.db.piecesInDb pieces:(vector long) = storage.db.PiecesInDb; + +storage.priorityAction.all priority:int = storage.PriorityAction; +storage.priorityAction.idx idx:long priority:int = storage.PriorityAction; +storage.priorityAction.name name:string priority:int = storage.PriorityAction; + +storage.daemon.config server_key:PublicKey cli_key_hash:int256 provider_address:string adnl_id:PublicKey dht_id:PublicKey = storage.daemon.provider.Config; + +storage.daemon.provider.params accept_new_contracts:Bool rate_per_mb_day:string max_span:int + minimal_file_size:long maximal_file_size:long = storage.daemon.provider.Params; + +storage.provider.db.key.state = storage.provider.db.key.State; +storage.provider.db.key.contractList = storage.provider.db.key.ContractList; +storage.provider.db.key.storageContract wc:int addr:int256 = storage.provider.db.key.StorageContract; +storage.provider.db.key.microchunkTree wc:int addr:int256 = storage.provider.db.key.MicrochunkTree; +storage.provider.db.key.providerConfig = storage.provider.db.key.ProviderConfig; + +storage.provider.db.state last_processed_lt:long = storage.provider.db.State; +storage.provider.db.contractAddress wc:int addr:int256 = storage.db.ContractAddress; +storage.provider.db.contractList contracts:(vector storage.provider.db.contractAddress) = storage.db.ContractList; +storage.provider.db.storageContract torrent_hash:int256 microchunk_hash:int256 created_time:int state:int file_size:long + rate:string max_span:int = storage.provider.db.StorageContract; +storage.provider.db.microchunkTree data:bytes = storage.provider.db.MicrochunkTree; + +storage.daemon.queryError message:string = storage.daemon.QueryError; +storage.daemon.success = storage.daemon.Success; +storage.daemon.torrent + hash:int256 flags:# + // 0 - info ready + // 1 - header ready + // 2 - fatal error + total_size:flags.0?long description:flags.0?string + files_count:flags.1?long included_size:flags.1?long dir_name:flags.1?string + downloaded_size:long + root_dir:string active_download:Bool completed:Bool + download_speed:double upload_speed:double + fatal_error:flags.2?string + = storage.daemon.Torrent; +storage.daemon.fileInfo + name:string size:long + priority:int + downloaded_size:long + = storage.daemon.FileInfo; +storage.daemon.torrentFull torrent:storage.daemon.torrent files:(vector storage.daemon.fileInfo) = storage.daemon.TorrentFull; +storage.daemon.torrentList torrents:(vector storage.daemon.torrent) = storage.daemon.TorrentList; +storage.daemon.torrentMeta meta:bytes = storage.daemon.TorrentMeta; + +storage.daemon.newContractParams rate:string max_span:int = storage.daemon.NewContractParams; +storage.daemon.newContractParamsAuto provider_address:string = storage.daemon.NewContractParams; +storage.daemon.newContractMessage body:bytes rate:string max_span:int = storage.daemon.NewContractMessage; + +storage.daemon.peer adnl_id:int256 ip_str:string download_speed:double upload_speed:double ready_parts:long = storage.daemon.Peer; +storage.daemon.peerList peers:(vector storage.daemon.peer) download_speed:double upload_speed:double total_parts:long = storage.daemon.PeerList; + +storage.daemon.prioritySet = storage.daemon.SetPriorityStatus; +storage.daemon.priorityPending = storage.daemon.SetPriorityStatus; + +storage.daemon.keyHash key_hash:int256 = storage.daemon.KeyHash; + +storage.daemon.providerConfig max_contracts:int max_total_size:long = storage.daemon.ProviderConfig; +storage.daemon.contractInfo address:string state:int torrent:int256 created_time:int file_size:long downloaded_size:long + rate:string max_span:int client_balance:string contract_balance:string = storage.daemon.ContractInfo; +storage.daemon.providerInfo address:string balance:string config:storage.daemon.providerConfig + contracts_count:int contracts_total_size:long + contracts:(vector storage.daemon.contractInfo) = storage.daemon.ProviderInfo; +storage.daemon.providerAddress address:string = storage.daemon.ProviderAddress; + +---functions--- +storage.daemon.setVerbosity verbosity:int = storage.daemon.Success; +storage.daemon.createTorrent path:string description:string = storage.daemon.TorrentFull; +storage.daemon.addByHash hash:int256 root_dir:string start_download:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull; +storage.daemon.addByMeta meta:bytes root_dir:string start_download:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull; +storage.daemon.setActiveDownload hash:int256 active:Bool = storage.daemon.Success; + +storage.daemon.getTorrents = storage.daemon.TorrentList; +storage.daemon.getTorrentFull hash:int256 = storage.daemon.TorrentFull; +storage.daemon.getTorrentMeta hash:int256 = storage.daemon.TorrentMeta; +storage.daemon.getNewContractMessage hash:int256 query_id:long params:storage.daemon.NewContractParams = storage.daemon.NewContractMessage; +storage.daemon.getTorrentPeers hash:int256 = storage.daemon.PeerList; + +storage.daemon.setFilePriorityAll hash:int256 priority:int = storage.daemon.SetPriorityStatus; +storage.daemon.setFilePriorityByIdx hash:int256 idx:long priority:int = storage.daemon.SetPriorityStatus; +storage.daemon.setFilePriorityByName hash:int256 name:string priority:int = storage.daemon.SetPriorityStatus; + +storage.daemon.removeTorrent hash:int256 remove_files:Bool = storage.daemon.Success; +storage.daemon.loadFrom hash:int256 meta:bytes path:string = storage.daemon.Torrent; + +storage.daemon.importPrivateKey key:PrivateKey = storage.daemon.KeyHash; +storage.daemon.initProvider account_address:string = storage.daemon.Success; +storage.daemon.deployProvider = storage.daemon.ProviderAddress; +storage.daemon.getProviderParams address:string = storage.daemon.provider.Params; +storage.daemon.setProviderParams params:storage.daemon.provider.params = storage.daemon.Success; +storage.daemon.getProviderInfo with_balances:Bool with_contracts:Bool = storage.daemon.ProviderInfo; +storage.daemon.setProviderConfig config:storage.daemon.providerConfig = storage.daemon.Success; +storage.daemon.withdraw contract:string = storage.daemon.Success; +storage.daemon.sendCoins address:string amount:string message:string = storage.daemon.Success; +storage.daemon.closeStorageContract address:string = storage.daemon.Success; +storage.daemon.removeStorageProvider = storage.daemon.Success; diff --git a/tl/generate/scheme/ton_api.tlo b/tl/generate/scheme/ton_api.tlo index b9350346..b299fb5a 100644 Binary files a/tl/generate/scheme/ton_api.tlo and b/tl/generate/scheme/ton_api.tlo differ diff --git a/tl/generate/scheme/tonlib_api.tl b/tl/generate/scheme/tonlib_api.tl index 79185d5b..553ae4f2 100644 --- a/tl/generate/scheme/tonlib_api.tl +++ b/tl/generate/scheme/tonlib_api.tl @@ -291,7 +291,7 @@ query.estimateFees id:int53 ignore_chksig:Bool = query.Fees; query.getInfo id:int53 = query.Info; smc.load account_address:accountAddress = smc.Info; -//smc.forget id:int53 = Ok; +smc.forget id:int53 = Ok; smc.getCode id:int53 = tvm.Cell; smc.getData id:int53 = tvm.Cell; smc.getState id:int53 = tvm.Cell; diff --git a/tl/generate/scheme/tonlib_api.tlo b/tl/generate/scheme/tonlib_api.tlo index ee457609..db7810e3 100644 Binary files a/tl/generate/scheme/tonlib_api.tlo and b/tl/generate/scheme/tonlib_api.tlo differ diff --git a/tonlib/CMakeLists.txt b/tonlib/CMakeLists.txt index c74b24db..f27e00f2 100644 --- a/tonlib/CMakeLists.txt +++ b/tonlib/CMakeLists.txt @@ -17,6 +17,7 @@ set(TONLIB_SOURCE tonlib/LastConfig.cpp tonlib/Logging.cpp tonlib/TonlibClient.cpp + tonlib/TonlibClientWrapper.cpp tonlib/utils.cpp tonlib/Client.h @@ -32,6 +33,7 @@ set(TONLIB_SOURCE tonlib/Logging.h tonlib/TonlibCallback.h tonlib/TonlibClient.h + tonlib/TonlibClientWrapper.h tonlib/utils.h tonlib/keys/bip39.cpp diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index 3f29eeaa..9dc42c28 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -2533,7 +2533,7 @@ struct ToRawTransactions { auto body_hash = body_cell->get_hash().as_slice().str(); td::Ref init_state_cell; - auto& init_state_cs = message.init.write(); + auto& init_state_cs = message.init.write(); if (init_state_cs.fetch_ulong(1) == 1) { if (init_state_cs.fetch_long(1) == 0) { init_state_cell = vm::CellBuilder().append_cellslice(init_state_cs).finalize(); @@ -2542,7 +2542,7 @@ struct ToRawTransactions { } } - auto get_data = [body = std::move(body), body_cell = std::move(body_cell), + auto get_data = [body = std::move(body), body_cell = std::move(body_cell), init_state_cell = std::move(init_state_cell), this](td::Slice salt) mutable { tonlib_api::object_ptr data; if (try_decode_messages_ && body->size() >= 32 && static_cast(body->prefetch_long(32)) <= 1) { @@ -2731,7 +2731,7 @@ td::Status TonlibClient::do_request(const tonlib_api::raw_sendMessageReturnHash& td::Promise>&& promise) { TRY_RESULT_PREFIX(body, vm::std_boc_deserialize(request.body_), TonlibError::InvalidBagOfCells("body")); auto hash = body->get_hash().as_slice().str(); - make_request(int_api::SendMessage{std::move(body)}, + make_request(int_api::SendMessage{std::move(body)}, promise.wrap([hash = std::move(hash)](auto res) { return tonlib_api::make_object(std::move(hash)); })); @@ -2859,7 +2859,7 @@ td::Status TonlibClient::do_request(tonlib_api::raw_getTransactionsV2& request, auto actor_id = actor_id_++; actors_[actor_id] = td::actor::create_actor( - "GetTransactionHistory", client_.get_client(), account_address, lt, hash, count, actor_shared(this, actor_id), + "GetTransactionHistory", client_.get_client(), account_address, lt, hash, count, actor_shared(this, actor_id), promise.wrap([private_key = std::move(private_key), try_decode_messages = request.try_decode_messages_](auto&& x) mutable { return ToRawTransactions(std::move(private_key), try_decode_messages).to_raw_transactions(std::move(x)); })); @@ -3710,6 +3710,17 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_load& request, return td::Status::OK(); } +td::Status TonlibClient::do_request(const tonlib_api::smc_forget& request, + td::Promise>&& promise) { + auto it = smcs_.find(request.id_); + if (it == smcs_.end()) { + return TonlibError::InvalidSmcId(); + } + smcs_.erase(it); + promise.set_value(tonlib_api::make_object()); + return td::Status::OK(); +} + td::Status TonlibClient::do_request(const tonlib_api::smc_getCode& request, td::Promise>&& promise) { auto it = smcs_.find(request.id_); @@ -3890,7 +3901,7 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_getLibraries& request, return td::Status::OK(); } - client_.send_query(ton::lite_api::liteServer_getLibraries(std::move(not_cached_hashes)), + client_.send_query(ton::lite_api::liteServer_getLibraries(std::move(not_cached_hashes)), promise.wrap([self=this, result_entries = std::move(result_entries)] (td::Result> r_libraries) mutable { @@ -4632,7 +4643,7 @@ td::Status TonlibClient::do_request(const tonlib_api::getConfigParam& request, std::vector params = { param }; client_.send_query(ton::lite_api::liteServer_getConfigParams(0, std::move(lite_block), std::move(params)), - promise.wrap([block, param](auto r_config) { + promise.wrap([block, param](auto r_config) { auto state = block::check_extract_state_proof(block, r_config->state_proof_.as_slice(), r_config->config_proof_.as_slice()); if (state.is_error()) { @@ -4716,7 +4727,7 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_lookupBlock& reques auto to_tonlib_api(const ton::lite_api::liteServer_transactionId& txid) -> tonlib_api_ptr { - return tonlib_api::make_object( + return tonlib_api::make_object( txid.mode_, txid.account_.as_slice().str(), txid.lt_, txid.hash_.as_slice().str()); } diff --git a/tonlib/tonlib/TonlibClient.h b/tonlib/tonlib/TonlibClient.h index e7819290..0bb7aadc 100644 --- a/tonlib/tonlib/TonlibClient.h +++ b/tonlib/tonlib/TonlibClient.h @@ -305,6 +305,7 @@ class TonlibClient : public td::actor::Actor { td::Result> get_smc_info(td::int64 id); void finish_load_smc(td::unique_ptr query, td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_load& request, td::Promise>&& promise); + td::Status do_request(const tonlib_api::smc_forget& request, td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_getCode& request, td::Promise>&& promise); td::Status do_request(const tonlib_api::smc_getData& request, diff --git a/rldp-http-proxy/TonlibClient.cpp b/tonlib/tonlib/TonlibClientWrapper.cpp similarity index 66% rename from rldp-http-proxy/TonlibClient.cpp rename to tonlib/tonlib/TonlibClientWrapper.cpp index a8e18b81..8dff1874 100644 --- a/rldp-http-proxy/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClientWrapper.cpp @@ -23,44 +23,40 @@ exception statement from your version. If you delete this exception statement from all source files in the program, then also delete it here. */ -#include "TonlibClient.h" +#include "TonlibClientWrapper.h" -TonlibClient::TonlibClient(ton::tl_object_ptr options) : options_(std::move(options)) { +namespace tonlib { + +TonlibClientWrapper::TonlibClientWrapper(ton::tl_object_ptr options) + : options_(std::move(options)) { } -void TonlibClient::start_up() { +void TonlibClientWrapper::start_up() { class Cb : public tonlib::TonlibCallback { public: - explicit Cb(td::actor::ActorId self_id) : self_id_(self_id) { + explicit Cb(td::actor::ActorId self_id) : self_id_(self_id) { } void on_result(std::uint64_t id, tonlib_api::object_ptr result) override { - td::actor::send_closure(self_id_, &TonlibClient::receive_request_result, id, std::move(result)); + td::actor::send_closure(self_id_, &TonlibClientWrapper::receive_request_result, id, std::move(result)); } void on_error(std::uint64_t id, tonlib_api::object_ptr error) override { - td::actor::send_closure(self_id_, &TonlibClient::receive_request_result, id, + td::actor::send_closure(self_id_, &TonlibClientWrapper::receive_request_result, id, td::Status::Error(error->code_, std::move(error->message_))); } private: - td::actor::ActorId self_id_; + td::actor::ActorId self_id_; }; tonlib_client_ = td::actor::create_actor("tonlibclient", td::make_unique(actor_id(this))); auto init = tonlib_api::make_object(std::move(options_)); - auto P = td::PromiseCreator::lambda([](td::Result> R) mutable { - R.ensure(); - }); + auto P = td::PromiseCreator::lambda( + [](td::Result> R) mutable { R.ensure(); }); send_request(std::move(init), std::move(P)); } -void TonlibClient::send_request(tonlib_api::object_ptr obj, - td::Promise> promise) { - auto id = next_request_id_++; - CHECK(requests_.emplace(id, std::move(promise)).second); - td::actor::send_closure(tonlib_client_, &tonlib::TonlibClient::request, id, std::move(obj)); -} - -void TonlibClient::receive_request_result(td::uint64 id, td::Result> R) { +void TonlibClientWrapper::receive_request_result(td::uint64 id, + td::Result> R) { if (id == 0) { return; } @@ -69,4 +65,6 @@ void TonlibClient::receive_request_result(td::uint64 id, td::Resultsecond); requests_.erase(it); promise.set_result(std::move(R)); -} \ No newline at end of file +} + +} // namespace tonlib \ No newline at end of file diff --git a/rldp-http-proxy/TonlibClient.h b/tonlib/tonlib/TonlibClientWrapper.h similarity index 68% rename from rldp-http-proxy/TonlibClient.h rename to tonlib/tonlib/TonlibClientWrapper.h index 0b75c6c0..041b25de 100644 --- a/rldp-http-proxy/TonlibClient.h +++ b/tonlib/tonlib/TonlibClientWrapper.h @@ -28,14 +28,26 @@ #include "auto/tl/tonlib_api.hpp" #include "tonlib/tonlib/TonlibClient.h" -class TonlibClient : public td::actor::Actor { +namespace tonlib { + +class TonlibClientWrapper : public td::actor::Actor { public: - explicit TonlibClient(ton::tl_object_ptr options); + explicit TonlibClientWrapper(ton::tl_object_ptr options); void start_up() override; - void send_request(tonlib_api::object_ptr obj, - td::Promise> promise); + template + void send_request(tonlib_api::object_ptr obj, td::Promise promise) { + auto id = next_request_id_++; + auto P = promise.wrap([](tonlib_api::object_ptr x) -> td::Result { + if (x->get_id() != F::ReturnType::element_type::ID) { + return td::Status::Error("Invalid response from tonlib"); + } + return ton::move_tl_object_as(std::move(x)); + }); + CHECK(requests_.emplace(id, std::move(P)).second); + td::actor::send_closure(tonlib_client_, &tonlib::TonlibClient::request, id, std::move(obj)); + } private: void receive_request_result(td::uint64 id, td::Result> R); @@ -45,3 +57,5 @@ class TonlibClient : public td::actor::Actor { std::map>> requests_; td::uint64 next_request_id_{1}; }; + +} // namespace tonlib