1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-03-09 15:40:10 +00:00

TON Storage utilities (#564)

* Rename chunk to piece in MerkleTree for consistency

* Refactor PeerManager

* Make PeerState thread-safe

* Download torrent by hash

* First version of storage daemon

* Download torrents partially

* Improve storing and loading torrent state in DB

* Rewrite MerkleTree

* "Remove torrent" in storage daemon

* Process errors, fix bugs in storage

* Move TonlibClientWrapper from rldp-http-proxy to tonlib

* Initial version of storage provider

* Move interaction with contracts to smc-util

* Improve TonlibClientWrapper interface

* Various improvements in storage provider

* Fix TorrentCreator.cpp

* Improve interface for partial download

* Client mode in storage-daemon

* Improve interface of storage-daemon-cli

* Fix calculating speed, show peers in storage-daemon

* Use permanent adnl id in storage daemon

* Fix sending large "storage.addUpdate" messages

* Improve printing torrents in cli

* Update tlo

* Fix RldpSender::on_ack

* Update storage provider

* Add "address" parameter to get-provider-params

* Allow client to close storage contract

* Limit torrent description

* Add more logs to storage provider

* smc.forget tonlib method

* Use smc.forget in storage daemon

* Optimize sending messages in smc-util.cpp

* Fix verbosity, remove excessive logs

* Json output in storage-daemon-cli

* Update storage provider contracts

* Fix rldp2 acks

* Change verbosity of logs in rldp2

* Update help and output of commands and in storage-daemon-cli

Co-authored-by: SpyCheese <mikle98@yandex.ru>
This commit is contained in:
EmelyanenkoK 2022-12-22 12:24:13 +03:00 committed by GitHub
parent 434dc487a4
commit 360ef54e6b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
75 changed files with 8872 additions and 1148 deletions

View file

@ -90,12 +90,20 @@ void OverlayManager::delete_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdSho
}
void OverlayManager::create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope) {
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope) {
create_public_overlay_ex(local_id, std::move(overlay_id), std::move(callback), std::move(rules), std::move(scope),
true);
}
void OverlayManager::create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope, bool announce_self) {
CHECK(!dht_node_.empty());
auto id = overlay_id.compute_short_id();
register_overlay(local_id, id,
Overlay::create(keyring_, adnl_, actor_id(this), dht_node_, local_id, std::move(overlay_id),
std::move(callback), std::move(rules), scope));
std::move(callback), std::move(rules), scope, announce_self));
}
void OverlayManager::create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,

View file

@ -52,6 +52,9 @@ class OverlayManager : public Overlays {
void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope) override;
void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope,
bool announce_self) override;
void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Callback> callback,
OverlayPrivacyRules rules) override;

View file

@ -171,7 +171,9 @@ void OverlayImpl::receive_random_peers(adnl::AdnlNodeIdShort src, td::Result<td:
void OverlayImpl::send_random_peers_cont(adnl::AdnlNodeIdShort src, OverlayNode node,
td::Promise<td::BufferSlice> promise) {
std::vector<tl_object_ptr<ton_api::overlay_node>> vec;
vec.emplace_back(node.tl());
if (announce_self_) {
vec.emplace_back(node.tl());
}
for (td::uint32 i = 0; i < nodes_to_send(); i++) {
auto P = get_random_peer(true);

View file

@ -37,10 +37,10 @@ td::actor::ActorOwn<Overlay> Overlay::create(td::actor::ActorId<keyring::Keyring
td::actor::ActorId<OverlayManager> manager,
td::actor::ActorId<dht::Dht> dht_node, adnl::AdnlNodeIdShort local_id,
OverlayIdFull overlay_id, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope) {
OverlayPrivacyRules rules, td::string scope, bool announce_self) {
auto R = td::actor::create_actor<OverlayImpl>("overlay", keyring, adnl, manager, dht_node, local_id,
std::move(overlay_id), true, std::vector<adnl::AdnlNodeIdShort>(),
std::move(callback), std::move(rules), scope);
std::move(callback), std::move(rules), scope, announce_self);
return td::actor::ActorOwn<Overlay>(std::move(R));
}
@ -60,7 +60,7 @@ OverlayImpl::OverlayImpl(td::actor::ActorId<keyring::Keyring> keyring, td::actor
td::actor::ActorId<OverlayManager> manager, td::actor::ActorId<dht::Dht> dht_node,
adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope)
OverlayPrivacyRules rules, td::string scope, bool announce_self)
: keyring_(keyring)
, adnl_(adnl)
, manager_(manager)
@ -70,7 +70,8 @@ OverlayImpl::OverlayImpl(td::actor::ActorId<keyring::Keyring> keyring, td::actor
, callback_(std::move(callback))
, public_(pub)
, rules_(std::move(rules))
, scope_(scope) {
, scope_(scope)
, announce_self_(announce_self) {
overlay_id_ = id_full_.compute_short_id();
VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private");
@ -328,6 +329,10 @@ void OverlayImpl::receive_dht_nodes(td::Result<dht::DhtValue> res, bool dummy) {
VLOG(OVERLAY_NOTICE) << this << ": can not get value from DHT: " << res.move_as_error();
}
if (!announce_self_) {
return;
}
VLOG(OVERLAY_INFO) << this << ": adding self node to DHT overlay's nodes";
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), oid = print_id()](td::Result<OverlayNode> R) {
if (R.is_error()) {

View file

@ -42,7 +42,7 @@ class Overlay : public td::actor::Actor {
td::actor::ActorId<OverlayManager> manager,
td::actor::ActorId<dht::Dht> dht_node, adnl::AdnlNodeIdShort local_id,
OverlayIdFull overlay_id, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope);
OverlayPrivacyRules rules, td::string scope, bool announce_self = true);
static td::actor::ActorOwn<Overlay> create(td::actor::ActorId<keyring::Keyring> keyring,
td::actor::ActorId<adnl::Adnl> adnl,
td::actor::ActorId<OverlayManager> manager,

View file

@ -124,7 +124,7 @@ class OverlayImpl : public Overlay {
td::actor::ActorId<OverlayManager> manager, td::actor::ActorId<dht::Dht> dht_node,
adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Overlays::Callback> callback,
OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }");
OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool announce_self = true);
void update_dht_node(td::actor::ActorId<dht::Dht> dht) override {
dht_node_ = dht;
}
@ -366,6 +366,7 @@ class OverlayImpl : public Overlay {
bool semi_public_ = false;
OverlayPrivacyRules rules_;
td::string scope_;
bool announce_self_ = true;
std::map<PublicKeyHash, std::shared_ptr<Certificate>> certs_;
class CachedEncryptor : public td::ListNode {

View file

@ -193,7 +193,11 @@ class Overlays : public td::actor::Actor {
virtual void update_dht_node(td::actor::ActorId<dht::Dht> dht) = 0;
virtual void create_public_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules, td::string scope) = 0;
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope) = 0;
virtual void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::unique_ptr<Callback> callback, OverlayPrivacyRules rules,
td::string scope, bool announce_self) = 0;
virtual void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id,
std::vector<adnl::AdnlNodeIdShort> nodes, std::unique_ptr<Callback> callback,
OverlayPrivacyRules rules) = 0;

View file

@ -1,5 +1,5 @@
cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR)
add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h TonlibClient.h TonlibClient.cpp DNSResolver.cpp)
add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h DNSResolver.cpp)
target_include_directories(rldp-http-proxy PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
target_link_libraries(rldp-http-proxy PRIVATE tonhttp rldp dht tonlib git)

View file

@ -30,7 +30,8 @@
static const double CACHE_TIMEOUT_HARD = 300.0;
static const double CACHE_TIMEOUT_SOFT = 270.0;
DNSResolver::DNSResolver(td::actor::ActorId<TonlibClient> tonlib_client) : tonlib_client_(std::move(tonlib_client)) {
DNSResolver::DNSResolver(td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client)
: tonlib_client_(std::move(tonlib_client)) {
}
void DNSResolver::start_up() {
@ -39,14 +40,15 @@ void DNSResolver::start_up() {
void DNSResolver::sync() {
auto obj = tonlib_api::make_object<tonlib_api::sync>();
auto P = td::PromiseCreator::lambda([SelfId =
actor_id(this)](td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R) {
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](
td::Result<tonlib_api::object_ptr<tonlib_api::ton_blockIdExt>> R) {
if (R.is_error()) {
LOG(WARNING) << "Sync error: " << R.move_as_error();
ton::delay_action([SelfId]() { td::actor::send_closure(SelfId, &DNSResolver::sync); }, td::Timestamp::in(5.0));
}
});
td::actor::send_closure(tonlib_client_, &TonlibClient::send_request, std::move(obj), std::move(P));
td::actor::send_closure(tonlib_client_, &tonlib::TonlibClientWrapper::send_request<tonlib_api::sync>, std::move(obj),
std::move(P));
}
void DNSResolver::resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdShort> promise) {
@ -66,18 +68,13 @@ void DNSResolver::resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdSho
td::Bits256 category = td::sha256_bits256(td::Slice("site", 4));
auto obj = tonlib_api::make_object<tonlib_api::dns_resolve>(nullptr, host, category, 16);
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), promise = std::move(promise), host = std::move(host)](
td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R) mutable {
td::Result<tonlib_api::object_ptr<tonlib_api::dns_resolved>> R) mutable {
if (R.is_error()) {
if (promise) {
promise.set_result(R.move_as_error());
}
} else {
auto v = R.move_as_ok();
auto obj = dynamic_cast<tonlib_api::dns_resolved *>(v.get());
if (obj == nullptr) {
promise.set_result(td::Status::Error("invalid response from tonlib"));
return;
}
auto obj = R.move_as_ok();
ton::adnl::AdnlNodeIdShort id;
td::uint32 cnt = 0;
for (auto &e : obj->entries_) {
@ -106,7 +103,8 @@ void DNSResolver::resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdSho
}
}
});
td::actor::send_closure(tonlib_client_, &TonlibClient::send_request, std::move(obj), std::move(P));
td::actor::send_closure(tonlib_client_, &tonlib::TonlibClientWrapper::send_request<tonlib_api::dns_resolve>,
std::move(obj), std::move(P));
}
void DNSResolver::save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id) {

View file

@ -25,13 +25,13 @@
*/
#pragma once
#include "td/actor/actor.h"
#include "TonlibClient.h"
#include "tonlib/tonlib/TonlibClientWrapper.h"
#include "adnl/adnl.h"
#include "td/actor/PromiseFuture.h"
class DNSResolver : public td::actor::Actor {
public:
explicit DNSResolver(td::actor::ActorId<TonlibClient> tonlib_client);
explicit DNSResolver(td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client);
void start_up() override;
void resolve(std::string host, td::Promise<ton::adnl::AdnlNodeIdShort> promise);
@ -40,7 +40,7 @@ class DNSResolver : public td::actor::Actor {
void sync();
void save_to_cache(std::string host, ton::adnl::AdnlNodeIdShort id);
td::actor::ActorId<TonlibClient> tonlib_client_;
td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client_;
struct CacheEntry {
ton::adnl::AdnlNodeIdShort id_;

View file

@ -54,7 +54,7 @@
#include "td/utils/BufferedFd.h"
#include "common/delay.h"
#include "TonlibClient.h"
#include "tonlib/tonlib/TonlibClientWrapper.h"
#include "DNSResolver.h"
#if TD_DARWIN || TD_LINUX
@ -919,7 +919,7 @@ class RldpHttpProxy : public td::actor::Actor {
auto tonlib_options = tonlib_api::make_object<tonlib_api::options>(
tonlib_api::make_object<tonlib_api::config>(conf_dataR.move_as_ok().as_slice().str(), "", false, false),
tonlib_api::make_object<tonlib_api::keyStoreTypeInMemory>());
tonlib_client_ = td::actor::create_actor<TonlibClient>("tonlibclient", std::move(tonlib_options));
tonlib_client_ = td::actor::create_actor<tonlib::TonlibClientWrapper>("tonlibclient", std::move(tonlib_options));
dns_resolver_ = td::actor::create_actor<DNSResolver>("dnsresolver", tonlib_client_.get());
}
@ -1315,7 +1315,7 @@ class RldpHttpProxy : public td::actor::Actor {
std::string db_root_ = ".";
bool proxy_all_ = false;
td::actor::ActorOwn<TonlibClient> tonlib_client_;
td::actor::ActorOwn<tonlib::TonlibClientWrapper> tonlib_client_;
td::actor::ActorOwn<DNSResolver> dns_resolver_;
std::map<td::Bits256,

View file

@ -18,6 +18,7 @@
*/
#include "BdwStats.h"
#include "rldp.hpp"
namespace ton {
namespace rldp2 {
@ -39,7 +40,7 @@ void BdwStats::on_packet_ack(const PacketInfo &info, td::Timestamp sent_at, td::
auto ack_passed = now.at() - info.delivered_now.at();
auto passed = td::max(sent_passed, ack_passed);
if (passed < 0.01) {
LOG(ERROR) << "Invalid passed " << passed;
VLOG(RLDP_WARNING) << "Invalid passed " << passed;
}
auto delivered = delivered_count - info.delivered_count;
on_rate_sample((double)delivered / passed, now, info.is_paused);

View file

@ -18,6 +18,7 @@
*/
#include "RldpConnection.h"
#include "rldp.hpp"
#include "td/utils/overloaded.h"
#include "td/utils/Random.h"
@ -83,7 +84,7 @@ td::Timestamp RldpConnection::loop_limits(td::Timestamp now) {
outbound_transfers_.erase(it);
to_on_sent_.emplace_back(limit->transfer_id, std::move(error));
} else {
LOG(ERROR) << "Timeout on unknown transfer " << limit->transfer_id.to_hex();
VLOG(RLDP_WARNING) << "Timeout on unknown transfer " << limit->transfer_id.to_hex();
}
}
limits_set_.erase(*limit);
@ -113,7 +114,7 @@ void RldpConnection::send(TransferId transfer_id, td::BufferSlice data, td::Time
td::Random::secure_bytes(transfer_id.as_slice());
} else {
if (outbound_transfers_.find(transfer_id) != outbound_transfers_.end()) {
LOG(WARNING) << "Skip resend of " << transfer_id.to_hex();
VLOG(RLDP_WARNING) << "Skip resend of " << transfer_id.to_hex();
return;
}
}
@ -143,17 +144,6 @@ void RldpConnection::loop_bbr(td::Timestamp now) {
double speed = bbr_.get_rate();
td::uint32 congestion_window = bbr_.get_window_size();
static td::Timestamp next;
//FIXME: remove this UNSAFE debug output
if (next.is_in_past(now)) {
next = td::Timestamp::in(1, now);
if (td::actor::core::ActorExecuteContext::get()->actor().get_actor_info_ptr()->get_name() == "Alice") {
LOG(ERROR) << "speed=" << td::format::as_size((td::int64)speed * 768) << " "
<< "cgw=" << td::format::as_size((td::int64)congestion_window * 768) << " "
<< "loss=" << loss_stats_.loss * 100 << "%";
}
}
pacer_.set_speed(speed);
congestion_window_ = congestion_window;
}
@ -301,7 +291,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) {
max_size = limit_it->max_size;
}
if (total_size > max_size) {
LOG(INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size;
VLOG(RLDP_INFO) << "Drop too big rldp query " << part.total_size_ << " > " << max_size;
return;
}
@ -324,7 +314,7 @@ void RldpConnection::receive_raw_obj(ton::ton_api::rldp2_messagePart &part) {
}
return {};
}
if (in_part->receiver.on_received(part.seqno_, td::Timestamp::now())) {
if (in_part->receiver.on_received(part.seqno_ + 1, td::Timestamp::now())) {
TRY_STATUS_PREFIX(in_part->decoder->add_symbol({static_cast<td::uint32>(part.seqno_), std::move(part.data_)}),
td::Status::Error(ErrorCode::protoviolation, "invalid symbol"));
if (in_part->decoder->may_try_decode()) {

View file

@ -33,9 +33,8 @@ void RldpReceiver::on_ack_sent(td::Timestamp now) {
//LOG(ERROR) << "RESEND ACK " << cnt_;
}
cnt_++;
if (cnt_ > 7) {
send_ack_at_ = {};
} else {
send_ack_at_ = {};
if (cnt_ <= 7) {
send_ack_at_.relax(td::Timestamp::at(now.at() + config_.ack_delay * (1 << cnt_)));
}
}

View file

@ -49,7 +49,6 @@ SenderPackets::Update RldpSender::on_ack(const Ack &ack, double ack_delay, td::T
//LOG(ERROR) << "ON ACK " << ack.max_seqno << " " << ack.received_mask << " " << ack.received_count;
auto update = packets_.on_ack(ack);
if (!update.was_max_updated) {
CHECK(!update.new_received);
return update;
}

View file

@ -18,17 +18,18 @@
*/
#include "RttStats.h"
#include "rldp.hpp"
#include <cmath>
namespace ton {
namespace rldp2 {
void RttStats::on_rtt_sample(double rtt_sample, double ack_delay, td::Timestamp now) {
if (rtt_sample < 0.001 || rtt_sample > 10) {
LOG(WARNING) << "Suspicious rtt sample " << rtt_sample;
VLOG(RLDP_WARNING) << "Suspicious rtt sample " << rtt_sample;
return;
}
if (ack_delay < -1e-9 || ack_delay > 10) {
LOG(WARNING) << "Suspicious ack_delay " << ack_delay;
VLOG(RLDP_WARNING) << "Suspicious ack_delay " << ack_delay;
return;
}
rtt_sample = td::max(0.01, rtt_sample);

View file

@ -53,13 +53,28 @@ struct Bitset {
}
auto mask = 1 << bit_i;
if ((bits_[i] & mask) == 0) {
bits_[i] |= mask;
bits_[i] |= (char)mask;
count_++;
return true;
}
return false;
}
bool set_zero(size_t offset) {
auto i = offset / 8;
if (i >= bits_.size()) {
return false;
}
auto bit_i = offset % 8;
auto mask = 1 << bit_i;
if (bits_[i] & mask) {
bits_[i] &= (char)~mask;
count_--;
return true;
}
return false;
}
size_t ones_count() const {
return count_;
}

View file

@ -17,6 +17,7 @@ set(STORAGE_SOURCE
TorrentInfo.cpp
TorrentMeta.cpp
db.h
Bitset.h
LoadSpeed.h
MerkleTree.h
@ -24,32 +25,33 @@ set(STORAGE_SOURCE
PartsHelper.h
PeerActor.h
PeerState.h
SharedState.h
Torrent.h
TorrentCreator.h
TorrentHeader.h
TorrentInfo.h
TorrentMeta.h
)
PeerManager.h
MicrochunkTree.h MicrochunkTree.cpp)
set(STORAGE_CLI_SOURCE
storage-cli.cpp
)
add_library(storage ${STORAGE_SOURCE})
target_link_libraries(storage tdutils tdactor tddb ton_crypto tl_api ${JEMALLOC_LIBRARIES})
target_include_directories(storage PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
target_include_directories(storage PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
)
add_executable(storage-cli ${STORAGE_CLI_SOURCE})
target_link_libraries(storage-cli storage overlay tdutils tdactor adnl tl_api dht
rldp rldp2 catchain validatorsession full-node validator ton_validator validator
fift-lib memprof terminal git ${JEMALLOC_LIBRARIES})
rldp rldp2 fift-lib memprof terminal git ${JEMALLOC_LIBRARIES})
set(STORAGE_TEST_SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/test/storage.cpp
PARENT_SCOPE
)
add_subdirectory(storage-daemon)
# Do not install it yet
#install(TARGETS storage-cli RUNTIME DESTINATION bin)

View file

@ -29,7 +29,7 @@ void LoadSpeed::add(std::size_t size, td::Timestamp now) {
}
double LoadSpeed::speed(td::Timestamp now) const {
update(now);
return total_size_ / duration();
return (double)total_size_ / duration(now);
}
td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) {
@ -37,15 +37,15 @@ td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) {
}
void LoadSpeed::update(td::Timestamp now) const {
while (duration() > 60) {
while (duration(now) > 30) {
total_size_ -= events_.front().size;
events_.pop();
}
}
double LoadSpeed::duration() const {
double LoadSpeed::duration(td::Timestamp now) const {
double res = 5;
if (events_.size() > 1) {
res = std::max(res, events_.back().at.at() - events_.front().at.at());
if (!events_.empty()) {
res = std::max(res, now.at() - events_.front().at.at());
}
return res;
}

View file

@ -26,19 +26,19 @@
namespace ton {
class LoadSpeed {
public:
void add(std::size_t size, td::Timestamp now);
void add(td::uint64 size, td::Timestamp now = td::Timestamp::now());
double speed(td::Timestamp now = td::Timestamp::now()) const;
friend td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed);
private:
struct Event {
std::size_t size;
td::uint64 size;
td::Timestamp at;
};
mutable td::VectorQueue<Event> events_;
mutable std::size_t total_size_{0};
mutable td::uint64 total_size_{0};
double duration() const;
double duration(td::Timestamp now) const;
void update(td::Timestamp now) const;
};
} // namespace ton

View file

@ -28,152 +28,51 @@
#include "vm/excno.hpp"
namespace ton {
static td::Ref<vm::Cell> unpack_proof(td::Ref<vm::Cell> root) {
static td::Result<td::Ref<vm::Cell>> unpack_proof(td::Ref<vm::Cell> root) {
vm::CellSlice cs(vm::NoVm(), root);
CHECK(cs.special_type() == vm::Cell::SpecialType::MerkleProof);
if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) {
return td::Status::Error("Not a merkle proof");
}
return cs.fetch_ref();
}
td::uint32 MerkleTree::get_depth() const {
return log_n_;
}
td::Ref<vm::Cell> MerkleTree::get_root(size_t depth_limit) const {
if (depth_limit > log_n_ || root_proof_.is_null()) {
return root_proof_;
MerkleTree::MerkleTree(size_t pieces_count, td::Bits256 root_hash)
: pieces_count_(pieces_count), root_hash_(root_hash) {
depth_ = 0;
n_ = 1;
while (n_ < pieces_count_) {
++depth_;
n_ <<= 1;
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_), depth_limit);
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
void MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) const {
if (depth_limit == 0) {
return;
static td::Ref<vm::Cell> build_tree(td::Bits256 *hashes, size_t len) {
if (len == 1) {
return vm::CellBuilder().store_bytes(hashes[0].as_slice()).finalize();
}
// check if it is possible to load node without breaking virtualization
vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw));
if (cs_raw.is_special()) {
return;
td::Ref<vm::Cell> l = build_tree(hashes, len / 2);
td::Ref<vm::Cell> r = build_tree(hashes + len / 2, len / 2);
return vm::CellBuilder().store_ref(l).store_ref(r).finalize();
};
MerkleTree::MerkleTree(std::vector<td::Bits256> hashes) : pieces_count_(hashes.size()) {
depth_ = 0;
n_ = 1;
while (n_ < pieces_count_) {
++depth_;
n_ <<= 1;
}
hashes.resize(n_, td::Bits256::zero());
td::Ref<vm::Cell> root = build_tree(hashes.data(), n_);
root_hash_ = root->get_hash().bits();
root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root));
}
static td::Status do_validate_proof(td::Ref<vm::Cell> node, size_t depth) {
if (node->get_depth(0) != depth) {
return td::Status::Error("Depth mismatch");
}
vm::CellSlice cs(vm::NoVm(), std::move(node));
while (cs.have_refs()) {
do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1);
}
}
td::Bits256 MerkleTree::get_root_hash() const {
CHECK(root_hash_);
return root_hash_.value();
}
MerkleTree::MerkleTree(size_t chunks_count, td::Bits256 root_hash) {
init_begin(chunks_count);
root_hash_ = root_hash;
init_finish();
}
MerkleTree::MerkleTree(size_t chunks_count, td::Ref<vm::Cell> root_proof) {
init_begin(chunks_count);
root_hash_ = unpack_proof(root_proof)->get_hash(0).as_array();
root_proof_ = std::move(root_proof);
init_finish();
}
MerkleTree::MerkleTree(td::Span<Chunk> chunks) {
init_begin(chunks.size());
for (size_t i = 0; i < chunks.size(); i++) {
CHECK(chunks[i].index == i);
init_add_chunk(i, chunks[i].hash.as_slice());
}
init_finish();
}
void MerkleTree::init_begin(size_t chunks_count) {
log_n_ = 0;
while ((size_t(1) << log_n_) < chunks_count) {
log_n_++;
}
n_ = size_t(1) << log_n_;
total_blocks_ = chunks_count;
mark_.resize(n_ * 2);
proof_.resize(n_ * 2);
td::UInt256 null{};
auto cell = vm::CellBuilder().store_bytes(null.as_slice()).finalize();
for (auto i = chunks_count; i < n_; i++) {
proof_[i + n_] = cell;
}
}
void MerkleTree::init_add_chunk(size_t index, td::Slice hash) {
CHECK(index < total_blocks_);
CHECK(proof_[index + n_].is_null());
proof_[index + n_] = vm::CellBuilder().store_bytes(hash).finalize();
}
void MerkleTree::init_finish() {
for (size_t i = n_ - 1; i >= 1; i--) {
auto j = i * 2;
if (proof_[j].is_null()) {
continue;
}
if (i + 1 < n_ && proof_[i + 1].not_null() && proof_[j]->get_hash() == proof_[j + 2]->get_hash() &&
proof_[j + 1]->get_hash() == proof_[j + 3]->get_hash()) {
// minor optimization for same chunks
proof_[i] = proof_[i + 1];
} else {
proof_[i] = vm::CellBuilder().store_ref(proof_[j]).store_ref(proof_[j + 1]).finalize();
}
}
if (proof_[1].not_null()) {
init_proof();
}
CHECK(root_hash_);
}
void MerkleTree::remove_chunk(std::size_t index) {
CHECK(index < n_);
index += n_;
while (proof_[index].not_null()) {
proof_[index] = {};
index /= 2;
}
}
bool MerkleTree::has_chunk(std::size_t index) const {
CHECK(index < n_);
index += n_;
return proof_[index].not_null();
}
void MerkleTree::add_chunk(std::size_t index, td::Slice hash) {
CHECK(hash.size() == 32);
CHECK(index < n_);
index += n_;
auto cell = vm::CellBuilder().store_bytes(hash).finalize();
CHECK(proof_[index].is_null());
proof_[index] = std::move(cell);
mark_[index] = mark_id_;
for (index /= 2; index != 0; index /= 2) {
CHECK(proof_[index].is_null());
auto &left = proof_[index * 2];
auto &right = proof_[index * 2 + 1];
if (left.not_null() && right.not_null()) {
proof_[index] = vm::CellBuilder().store_ref(left).store_ref(right).finalize();
mark_[index] = mark_id_;
}
}
}
static td::Status do_validate(td::Ref<vm::Cell> ref, size_t depth) {
vm::CellSlice cs(vm::NoVm(), std::move(ref));
if (cs.is_special()) {
if (cs.special_type() != vm::Cell::SpecialType::PrunnedBranch) {
return td::Status::Error("Unexpected special cell");
@ -194,154 +93,65 @@ static td::Status do_validate(td::Ref<vm::Cell> ref, size_t depth) {
if (cs.size_refs() != 2) {
return td::Status::Error("Node in proof must have two refs");
}
TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1));
TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1));
TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1));
TRY_STATUS(do_validate_proof(cs.fetch_ref(), depth - 1));
}
return td::Status::OK();
}
td::Status MerkleTree::validate_proof(td::Ref<vm::Cell> new_root) {
// 1. depth <= log_n
// 2. each non special node has two refs and nothing else
// 3. each list contains only hash
// 4. all special nodes are merkle proofs
vm::CellSlice cs(vm::NoVm(), new_root);
if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) {
return td::Status::Error("Proof must be a mekle proof cell");
td::Status MerkleTree::add_proof(td::Ref<vm::Cell> proof) {
if (proof.is_null()) {
return td::Status::OK();
}
auto root = cs.fetch_ref();
if (root_hash_ && root->get_hash(0).as_slice() != root_hash_.value().as_slice()) {
return td::Status::Error("Proof has invalid root hash");
TRY_RESULT(proof_raw, unpack_proof(proof));
if (root_hash_ != proof_raw->get_hash(0).bits()) {
return td::Status::Error("Root hash mismatch");
}
return do_validate(std::move(root), log_n_);
}
td::Status MerkleTree::add_proof(td::Ref<vm::Cell> new_root) {
CHECK(root_proof_.not_null() || root_hash_);
TRY_STATUS(validate_proof(new_root));
if (root_proof_.not_null()) {
auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(new_root));
TRY_STATUS(do_validate_proof(proof_raw, depth_));
if (root_proof_.is_null()) {
root_proof_ = std::move(proof);
} else {
auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(proof));
if (combined.is_null()) {
return td::Status::Error("Can't combine proofs");
}
root_proof_ = std::move(combined);
} else {
root_proof_ = std::move(new_root);
}
return td::Status::OK();
}
td::Status MerkleTree::validate_existing_chunk(const Chunk &chunk) {
vm::CellSlice cs(vm::NoVm(), proof_[chunk.index + n_]);
CHECK(cs.size() == chunk.hash.size());
if (cs.as_bitslice().compare(chunk.hash.cbits()) != 0) {
return td::Status::Error("Hash mismatch");
td::Result<td::Bits256> MerkleTree::get_piece_hash(size_t idx) const {
if (idx >= n_) {
return td::Status::Error("Index is too big");
}
return td::Status::OK();
}
td::Status MerkleTree::try_add_chunks(td::Span<Chunk> chunks) {
td::Bitset bitmask;
add_chunks(chunks, bitmask);
for (size_t i = 0; i < chunks.size(); i++) {
if (!bitmask.get(i)) {
return td::Status::Error(PSLICE() << "Invalid chunk #" << chunks[i].index);
}
}
return td::Status::OK();
}
void MerkleTree::add_chunks(td::Span<Chunk> chunks, td::Bitset &bitmask) {
if (root_proof_.is_null()) {
return;
return td::Status::Error("Hash is not known");
}
mark_id_++;
bitmask.reserve(chunks.size());
for (size_t i = 0; i < chunks.size(); i++) {
const auto &chunk = chunks[i];
if (has_chunk(chunk.index)) {
if (validate_existing_chunk(chunk).is_ok()) {
bitmask.set_one(i);
}
continue;
size_t l = 0, r = n_ - 1;
td::Ref<vm::Cell> node = unpack_proof(root_proof_).move_as_ok();
while (true) {
vm::CellSlice cs(vm::NoVm(), std::move(node));
if (cs.is_special()) {
return td::Status::Error("Hash is not known");
}
add_chunk(chunk.index, chunk.hash.as_slice());
}
root_proof_ = vm::CellBuilder::create_merkle_proof(merge(unpack_proof(root_proof_), 1));
for (size_t i = 0; i < chunks.size(); i++) {
const auto &chunk = chunks[i];
if (has_chunk(chunk.index) && mark_[chunk.index + n_] == mark_id_) {
bitmask.set_one(i);
if (l == r) {
td::Bits256 hash;
CHECK(cs.fetch_bits_to(hash.bits(), 256));
return hash;
}
}
}
td::Ref<vm::Cell> MerkleTree::merge(td::Ref<vm::Cell> root, size_t index) {
const auto &down = proof_[index];
if (down.not_null()) {
if (down->get_hash() != root->get_hash(0)) {
proof_[index] = {};
CHECK(cs.size_refs() == 2);
size_t mid = (l + r) / 2;
if (idx <= mid) {
node = cs.prefetch_ref(0);
r = mid;
} else {
return down;
node = cs.prefetch_ref(1);
l = mid + 1;
}
}
if (mark_[index] != mark_id_ || index >= n_) {
return root;
}
vm::CellSlice cs(vm::NoVm(), root);
if (cs.is_special()) {
cleanup_add(index);
return root;
}
CHECK(cs.size_refs() == 2);
vm::CellBuilder cb;
cb.store_bits(cs.fetch_bits(cs.size()));
auto left = merge(cs.fetch_ref(), index * 2);
auto right = merge(cs.fetch_ref(), index * 2 + 1);
cb.store_ref(std::move(left)).store_ref(std::move(right));
return cb.finalize();
}
void MerkleTree::cleanup_add(size_t index) {
if (mark_[index] != mark_id_) {
return;
}
proof_[index] = {};
if (index >= n_) {
return;
}
cleanup_add(index * 2);
cleanup_add(index * 2 + 1);
}
void MerkleTree::init_proof() {
CHECK(proof_[1].not_null());
td::Bits256 new_root_hash = proof_[1]->get_hash(0).as_array();
CHECK(!root_hash_ || root_hash_.value() == new_root_hash);
root_hash_ = new_root_hash;
root_proof_ = vm::CellBuilder::create_merkle_proof(proof_[1]);
}
td::Result<td::Ref<vm::Cell>> MerkleTree::gen_proof(size_t l, size_t r) {
if (root_proof_.is_null()) {
return td::Status::Error("got no proofs yet");
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r)));
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
td::Status MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) const {
static td::Status do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) {
if (ir < l || il > r) {
return td::Status::OK();
}
@ -358,4 +168,114 @@ td::Status MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir
TRY_STATUS(do_gen_proof(cs.fetch_ref(), ic + 1, ir, l, r));
return td::Status::OK();
}
td::Result<td::Ref<vm::Cell>> MerkleTree::gen_proof(size_t l, size_t r) const {
if (root_proof_.is_null()) {
return td::Status::Error("Got no proofs yet");
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r)));
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
static void do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) {
if (depth_limit == 0) {
return;
}
// check if it is possible to load node without breaking virtualization
vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw));
if (cs_raw.is_special()) {
return;
}
vm::CellSlice cs(vm::NoVm(), std::move(node));
while (cs.have_refs()) {
do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1);
}
}
td::Ref<vm::Cell> MerkleTree::get_root(size_t depth_limit) const {
if (depth_limit > depth_ || root_proof_.is_null()) {
return root_proof_;
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_).move_as_ok(), depth_limit);
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
static td::Ref<vm::Cell> build_from_hashes(std::pair<size_t, td::Bits256> *p, std::pair<size_t, td::Bits256> *pend,
size_t len) {
if (len == 1) {
return vm::CellBuilder().store_bytes((p < pend ? p->second : td::Bits256::zero()).as_slice()).finalize();
}
td::Ref<vm::Cell> l = build_from_hashes(p, pend, len / 2);
td::Ref<vm::Cell> r = build_from_hashes(p + len / 2, pend, len / 2);
return vm::CellBuilder().store_ref(l).store_ref(r).finalize();
}
td::Ref<vm::Cell> MerkleTree::do_add_pieces(td::Ref<vm::Cell> node, std::vector<size_t> &ok_pieces, size_t il,
size_t ir, std::pair<size_t, td::Bits256> *pl,
std::pair<size_t, td::Bits256> *pr) {
if (pl == pr || il >= pieces_count_) {
return node;
}
vm::CellSlice cs;
if (node.is_null() || (cs = vm::CellSlice(vm::NoVm(), node)).is_special() || il + 1 == ir) {
if ((size_t)(pr - pl) != std::min(ir, pieces_count_) - il) {
return node;
}
td::Ref<vm::Cell> new_node = build_from_hashes(pl, pr, ir - il);
td::Bits256 new_hash = new_node->get_hash().bits();
if (new_hash != (node.is_null() ? root_hash_ : node->get_hash(0).bits())) {
return node;
}
for (auto p = pl; p != pr; ++p) {
ok_pieces.push_back(p->first);
}
if (node.is_null() || cs.is_special()) {
node = std::move(new_node);
}
return node;
}
size_t imid = (il + ir) / 2;
auto pmid = pl;
while (pmid != pr && pmid->first < imid) {
++pmid;
}
td::Ref<vm::Cell> l = do_add_pieces(cs.prefetch_ref(0), ok_pieces, il, imid, pl, pmid);
td::Ref<vm::Cell> r = do_add_pieces(cs.prefetch_ref(1), ok_pieces, imid, ir, pmid, pr);
if (l != cs.prefetch_ref(0) || r != cs.prefetch_ref(1)) {
node = vm::CellBuilder().store_ref(l).store_ref(r).finalize();
}
return node;
}
std::vector<size_t> MerkleTree::add_pieces(std::vector<std::pair<size_t, td::Bits256>> pieces) {
if (pieces.empty()) {
return {};
}
std::sort(pieces.begin(), pieces.end());
for (size_t i = 0; i + 1 < pieces.size(); ++i) {
CHECK(pieces[i].first != pieces[i + 1].first);
}
CHECK(pieces.back().first < pieces_count_);
std::vector<size_t> ok_pieces;
td::Ref<vm::Cell> root;
if (!root_proof_.is_null()) {
root = unpack_proof(root_proof_).move_as_ok();
}
root = do_add_pieces(root, ok_pieces, 0, n_, pieces.data(), pieces.data() + pieces.size());
if (!root.is_null()) {
root_proof_ = vm::CellBuilder::create_merkle_proof(std::move(root));
}
return ok_pieces;
}
} // namespace ton

View file

@ -24,6 +24,7 @@
#include "vm/cells.h"
#include "Bitset.h"
#include <map>
namespace ton {
// merkle_node$_ {n:#} left:^(ton::MerkleTree n) right:^(ton::MerkleTree n) = ton::MerkleTree (n + 1);
@ -31,66 +32,33 @@ namespace ton {
class MerkleTree {
public:
td::uint32 get_depth() const;
td::Ref<vm::Cell> get_root(size_t depth_limit = std::numeric_limits<size_t>::max()) const;
td::Bits256 get_root_hash() const;
MerkleTree(size_t chunks_count, td::Bits256 root_hash);
MerkleTree(size_t chunks_count, td::Ref<vm::Cell> root_proof);
struct Chunk {
std::size_t index{0};
td::Bits256 hash;
};
explicit MerkleTree(td::Span<Chunk> chunks);
MerkleTree() = default;
void init_begin(size_t chunks_count);
void init_add_chunk(std::size_t index, td::Slice hash);
void init_finish();
MerkleTree(size_t pieces_count, td::Bits256 root_hash);
explicit MerkleTree(std::vector<td::Bits256> hashes);
// merge external proof with an existing proof
td::Status add_proof(td::Ref<vm::Cell> new_root);
// generate proof for all chunks from l to r inclusive
td::Result<td::Ref<vm::Cell>> gen_proof(size_t l, size_t r);
td::Status add_proof(td::Ref<vm::Cell> proof);
td::Result<td::Bits256> get_piece_hash(size_t idx) const;
td::Result<td::Ref<vm::Cell>> gen_proof(size_t l, size_t r) const;
td::Ref<vm::Cell> get_root(size_t depth_limit = std::numeric_limits<size_t>::max()) const;
// Trying to add and validate list of chunks simultaniously
td::Status try_add_chunks(td::Span<Chunk> chunks);
std::vector<size_t> add_pieces(std::vector<std::pair<size_t, td::Bits256>> pieces);
// Returns bitmask of successfully added chunks
// Intended to be used during validation of a torrent.
// We got arbitrary chunks read from disk, and we got an arbirary proof.
// Now we can say about some chunks that they are correct. This ia a general way
// to do this.
//
// NB: already added chunks are simply validated. One should be careful
// not to process them twice
void add_chunks(td::Span<Chunk> chunks, td::Bitset &bitmask);
size_t get_depth() const {
return depth_;
}
td::Bits256 get_root_hash() const {
return root_hash_;
}
private:
td::uint64 total_blocks_;
std::size_t n_; // n = 2^log_n
td::uint32 log_n_;
std::size_t mark_id_{0};
std::vector<std::size_t> mark_; // n_ * 2
std::vector<td::Ref<vm::Cell>> proof_; // n_ * 2
td::optional<td::Bits256> root_hash_;
size_t pieces_count_{0};
td::Bits256 root_hash_ = td::Bits256::zero();
size_t depth_{0}, n_{1};
td::Ref<vm::Cell> root_proof_;
td::Status validate_proof(td::Ref<vm::Cell> new_root);
bool has_chunk(std::size_t index) const;
void remove_chunk(std::size_t index);
void add_chunk(std::size_t index, td::Slice hash);
void init_proof();
td::Ref<vm::Cell> merge(td::Ref<vm::Cell> root, size_t index);
void cleanup_add(size_t index);
td::Status do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) const;
void do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) const;
td::Status validate_existing_chunk(const Chunk &chunk);
td::Ref<vm::Cell> do_add_pieces(td::Ref<vm::Cell> node, std::vector<size_t> &ok_pieces, size_t il, size_t ir,
std::pair<size_t, td::Bits256> *pl, std::pair<size_t, td::Bits256> *pr);
};
} // namespace ton

214
storage/MicrochunkTree.cpp Normal file
View file

@ -0,0 +1,214 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "MicrochunkTree.h"
#include "Torrent.h"
#include "vm/cells/CellSlice.h"
#include "vm/cells/MerkleProof.h"
namespace ton {
static td::Ref<vm::Cell> prun(const td::Ref<vm::Cell> &node) {
vm::CellBuilder cb;
cb.store_long(static_cast<td::uint8>(vm::Cell::SpecialType::PrunnedBranch), 8);
cb.store_long(1, 8);
cb.store_bytes(node->get_hash(0).as_slice());
cb.store_long(node->get_depth(0), 16);
return cb.finalize(true);
}
MicrochunkTree::Builder::Builder(td::uint64 file_size, td::uint64 prun_size)
: file_size_(file_size), prun_size_(prun_size) {
total_size_ = MICROCHUNK_SIZE;
while (total_size_ < file_size) {
total_size_ *= 2;
}
}
void MicrochunkTree::Builder::add_data(td::Slice s) {
CHECK(cur_size_ + s.size() <= file_size_);
while (s.size() > 0) {
size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE;
size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr;
if (buf_remaining > s.size()) {
memcpy(cur_microchunk_ + buf_ptr, s.data(), s.size());
cur_size_ += s.size();
return;
}
memcpy(cur_microchunk_ + buf_ptr, s.data(), buf_remaining);
cur_size_ += buf_remaining;
s.remove_prefix(buf_remaining);
add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE));
}
}
MicrochunkTree MicrochunkTree::Builder::finalize() {
CHECK(cur_size_ == file_size_);
if (cur_size_ % MICROCHUNK_SIZE != 0) {
size_t buf_ptr = cur_size_ % MICROCHUNK_SIZE;
size_t buf_remaining = MICROCHUNK_SIZE - buf_ptr;
memset(cur_microchunk_ + buf_ptr, 0, buf_remaining);
cur_size_ += buf_remaining;
add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE));
}
memset(cur_microchunk_, 0, MICROCHUNK_SIZE);
while (cur_size_ < total_size_) {
add_microchunk(td::Slice(cur_microchunk_, MICROCHUNK_SIZE));
cur_size_ += MICROCHUNK_SIZE;
}
CHECK(proof_.size() == 1);
MicrochunkTree tree(vm::CellBuilder::create_merkle_proof(std::move(proof_[0])));
CHECK(tree.total_size_ == total_size_);
return tree;
}
void MicrochunkTree::Builder::add_microchunk(td::Slice s) {
CHECK(s.size() == MICROCHUNK_SIZE);
td::Ref<vm::Cell> node = vm::CellBuilder().store_zeroes(2).store_bytes(s).finalize_novm();
while (!proof_.empty() && proof_.back()->get_depth(0) == node->get_depth(0)) {
td::Ref<vm::Cell> left = std::move(proof_.back());
proof_.pop_back();
node = vm::CellBuilder().store_zeroes(2).store_ref(std::move(left)).store_ref(std::move(node)).finalize_novm();
if ((MICROCHUNK_SIZE << node->get_depth(0)) <= prun_size_) {
node = prun(node);
}
}
proof_.push_back(std::move(node));
}
MicrochunkTree::MicrochunkTree(td::Ref<vm::Cell> root_proof) : root_proof_(root_proof) {
td::Ref<vm::Cell> virt_root = vm::MerkleProof::virtualize(root_proof_, 1);
CHECK(!virt_root.is_null());
CHECK(virt_root->get_depth() <= 50);
total_size_ = MICROCHUNK_SIZE << virt_root->get_depth();
root_hash_ = virt_root->get_hash().bits();
}
class GetMicrochunkProof {
public:
GetMicrochunkProof(td::uint64 l, td::uint64 r, Torrent &torrent) : l(l), r(r), torrent(torrent) {
}
td::Result<td::Ref<vm::Cell>> unprun(td::uint64 il, td::uint64 ir) {
if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) {
TRY_RESULT(data, get_microchunk(il));
return vm::CellBuilder().store_zeroes(2).store_bytes(data).finalize_novm();
}
td::uint64 imid = (il + ir) / 2;
TRY_RESULT(node_l, unprun(il, imid));
TRY_RESULT(node_r, unprun(imid, ir));
td::Ref<vm::Cell> node =
vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm();
if (l >= ir || il >= r) {
node = prun(node);
}
return node;
}
td::Result<td::Ref<vm::Cell>> unprun(const td::Ref<vm::Cell> &node, td::uint64 il, td::uint64 ir) {
vm::CellSlice cs(vm::NoVm(), node);
if (!cs.is_special()) {
return node;
}
TRY_RESULT(result, unprun(il, ir));
if (result->get_hash(0) != node->get_hash(0)) {
return td::Status::Error("Hash mismatch");
}
return result;
}
td::Result<td::Ref<vm::Cell>> get_proof(td::Ref<vm::Cell> node, td::uint64 il, td::uint64 ir) {
if (l >= ir || il >= r) {
return prun(node);
}
if (ir - il == MicrochunkTree::MICROCHUNK_SIZE) {
return unprun(node, il, ir);
}
if (l <= il && ir <= r) {
return prun(node);
}
td::uint64 imid = (il + ir) / 2;
TRY_RESULT_ASSIGN(node, unprun(node, il, ir));
vm::CellSlice cs(vm::NoVm(), node);
if (cs.size_ext() != 2 + (2 << 16)) {
return td::Status::Error("Invalid node in microchunk tree");
}
TRY_RESULT(node_l, get_proof(cs.prefetch_ref(0), il, imid));
TRY_RESULT(node_r, get_proof(cs.prefetch_ref(1), imid, ir));
return vm::CellBuilder().store_zeroes(2).store_ref(std::move(node_l)).store_ref(std::move(node_r)).finalize_novm();
}
private:
td::uint64 l, r;
Torrent &torrent;
td::uint64 cache_offset = 0;
std::string cache;
td::Result<td::Slice> get_microchunk(td::uint64 l) {
DCHECK(l % MicrochunkTree::MICROCHUNK_SIZE == 0);
td::uint64 r = l + MicrochunkTree::MICROCHUNK_SIZE;
if (!(cache_offset <= l && r <= cache_offset + cache.size())) {
td::uint64 piece_size = torrent.get_info().piece_size;
td::uint64 piece_i = l / piece_size;
if (piece_i < torrent.get_info().pieces_count()) {
TRY_RESULT(piece, torrent.get_piece_data(piece_i));
piece.resize(piece_size, '\0');
cache = std::move(piece);
} else {
cache = std::string(piece_size, '\0');
}
cache_offset = piece_i * piece_size;
}
return td::Slice{cache.data() + (l - cache_offset), MicrochunkTree::MICROCHUNK_SIZE};
}
};
td::Result<td::Ref<vm::Cell>> MicrochunkTree::get_proof(td::uint64 l, td::uint64 r, Torrent &torrent) const {
if (root_proof_.is_null()) {
return td::Status::Error("Empty microchunk tree");
}
if (l % MICROCHUNK_SIZE != 0 || r % MICROCHUNK_SIZE != 0 || l >= r || r > total_size_) {
return td::Status::Error("Invalid range");
}
if (!torrent.inited_info()) {
return td::Status::Error("Torrent info is not ready");
}
if (!torrent.get_info().piece_size % MICROCHUNK_SIZE != 0) {
return td::Status::Error("Invalid piece size in torrent");
}
td::Ref<vm::Cell> root_raw = vm::CellSlice(vm::NoVm(), root_proof_).prefetch_ref();
TRY_RESULT(result, GetMicrochunkProof(l, r, torrent).get_proof(std::move(root_raw), 0, total_size_));
return vm::CellBuilder::create_merkle_proof(std::move(result));
}
td::Result<MicrochunkTree> MicrochunkTree::Builder::build_for_torrent(Torrent &torrent, td::uint64 prun_size) {
if (!torrent.inited_info()) {
return td::Status::Error("Torrent info is not available");
}
const TorrentInfo &info = torrent.get_info();
Builder builder(info.file_size, prun_size);
td::uint64 pieces_count = info.pieces_count();
for (td::uint64 i = 0; i < pieces_count; ++i) {
TRY_RESULT(piece, torrent.get_piece_data(i));
builder.add_data(piece);
}
MicrochunkTree tree = builder.finalize();
return tree;
}
} // namespace ton

74
storage/MicrochunkTree.h Normal file
View file

@ -0,0 +1,74 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "td/utils/optional.h"
#include "td/utils/Slice.h"
#include "vm/cells.h"
#include "Bitset.h"
#include <map>
namespace ton {
class Torrent;
class MicrochunkTree {
public:
static const size_t MICROCHUNK_SIZE = 64;
class Builder {
public:
explicit Builder(td::uint64 file_size, td::uint64 prun_size = 1 << 17);
void add_data(td::Slice s);
MicrochunkTree finalize();
static td::Result<MicrochunkTree> build_for_torrent(Torrent &torrent, td::uint64 prun_size = 1 << 17);
private:
td::uint64 file_size_;
td::uint64 prun_size_;
td::uint64 total_size_;
std::vector<td::Ref<vm::Cell>> proof_;
unsigned char cur_microchunk_[MICROCHUNK_SIZE];
td::uint64 cur_size_ = 0;
void add_microchunk(td::Slice s);
};
MicrochunkTree() = default;
MicrochunkTree(td::Ref<vm::Cell> root_proof);
td::Result<td::Ref<vm::Cell>> get_proof(td::uint64 l, td::uint64 r, Torrent &torrent) const;
td::Ref<vm::Cell> get_root() const {
return root_proof_;
}
td::Bits256 get_root_hash() const {
return root_hash_;
}
td::uint64 get_total_size() const {
return total_size_;
}
private:
td::Bits256 root_hash_ = td::Bits256::zero();
td::uint64 total_size_ = 0;
td::Ref<vm::Cell> root_proof_ = {};
};
} // namespace ton

File diff suppressed because it is too large Load diff

View file

@ -25,42 +25,87 @@
#include "Torrent.h"
#include "td/utils/Random.h"
#include "td/utils/Variant.h"
#include <map>
#include "db.h"
namespace ton {
class NodeActor : public td::actor::Actor {
public:
class NodeCallback {
public:
virtual ~NodeCallback() = default;
virtual td::actor::ActorOwn<PeerActor> create_peer(PeerId self_id, PeerId peer_id,
std::shared_ptr<PeerState> state) = 0;
virtual void get_peers(PeerId src, td::Promise<std::vector<PeerId>> peers) = 0;
virtual void register_self(td::actor::ActorId<ton::NodeActor> self) = 0;
virtual void get_peer_info(PeerId src, PeerId peer, td::Promise<std::pair<td::Bits256, std::string>> promise) {
promise.set_error(td::Status::Error("Not implemented"));
}
};
class Callback {
public:
virtual ~Callback() {
}
virtual td::actor::ActorOwn<PeerActor> create_peer(PeerId self_id, PeerId peer_id,
td::SharedState<PeerState> state) = 0;
virtual void get_peers(td::Promise<std::vector<PeerId>> peers) = 0;
virtual void register_self(td::actor::ActorId<ton::NodeActor> self) = 0;
//TODO: proper callbacks
virtual ~Callback() = default;
virtual void on_completed() = 0;
virtual void on_closed(ton::Torrent torrent) = 0;
};
NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr<Callback> callback, bool should_download = true);
struct PendingSetFilePriority {
struct All {};
td::Variant<All, size_t, std::string> file;
td::uint8 priority;
};
struct DbInitialData {
std::vector<PendingSetFilePriority> priorities;
std::set<td::uint64> pieces_in_db;
};
NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr<Callback> callback,
td::unique_ptr<NodeCallback> node_callback, std::shared_ptr<db::DbType> db, bool should_download = true);
NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr<Callback> callback,
td::unique_ptr<NodeCallback> node_callback, std::shared_ptr<db::DbType> db, bool should_download,
DbInitialData db_initial_data);
void start_peer(PeerId peer_id, td::Promise<td::actor::ActorId<PeerActor>> promise);
ton::Torrent *with_torrent() {
return &torrent_;
struct NodeState {
Torrent &torrent;
bool active_download;
double download_speed;
double upload_speed;
const std::vector<td::uint8> &file_priority;
};
void with_torrent(td::Promise<NodeState> promise) {
promise.set_value(
NodeState{torrent_, should_download_, download_speed_.speed(), upload_speed_.speed(), file_priority_});
}
std::string get_stats_str();
void set_file_priority(size_t i, td::uint8 priority);
void set_should_download(bool should_download);
void set_all_files_priority(td::uint8 priority, td::Promise<bool> promise);
void set_file_priority_by_idx(size_t i, td::uint8 priority, td::Promise<bool> promise);
void set_file_priority_by_name(std::string name, td::uint8 priority, td::Promise<bool> promise);
void load_from(td::optional<TorrentMeta> meta, std::string files_path, td::Promise<td::Unit> promise);
void wait_for_completion(td::Promise<td::Unit> promise);
void get_peers_info(td::Promise<tl_object_ptr<ton_api::storage_daemon_peerList>> promise);
static void load_from_db(std::shared_ptr<db::DbType> db, td::Bits256 hash, td::unique_ptr<Callback> callback,
td::unique_ptr<NodeCallback> node_callback,
td::Promise<td::actor::ActorOwn<NodeActor>> promise);
static void cleanup_db(std::shared_ptr<db::DbType> db, td::Bits256 hash, td::Promise<td::Unit> promise);
private:
PeerId self_id_;
ton::Torrent torrent_;
std::shared_ptr<td::BufferSlice> torrent_info_str_;
std::vector<td::uint8> file_priority_;
td::unique_ptr<Callback> callback_;
td::unique_ptr<NodeCallback> node_callback_;
std::shared_ptr<db::DbType> db_;
bool should_download_{false};
class Notifier : public td::actor::Actor {
@ -80,24 +125,13 @@ class NodeActor : public td::actor::Actor {
struct Peer {
td::actor::ActorOwn<PeerActor> actor;
td::actor::ActorOwn<Notifier> notifier;
td::SharedState<PeerState> state;
std::shared_ptr<PeerState> state;
PartsHelper::PeerToken peer_token;
LoadSpeed download_speed, upload_speed;
};
std::map<PeerId, Peer> peers_;
struct QueryId {
PeerId peer;
PartId part;
auto key() const {
return std::tie(peer, part);
}
bool operator<(const QueryId &other) const {
return key() < other.key();
}
};
struct PartsSet {
struct Info {
td::optional<PeerId> query_to_peer;
@ -110,7 +144,7 @@ class NodeActor : public td::actor::Actor {
PartsSet parts_;
PartsHelper parts_helper_;
std::vector<PartId> ready_parts_;
LoadSpeed download_;
LoadSpeed download_speed_, upload_speed_;
td::Timestamp next_get_peers_at_;
bool has_get_peers_{false};
@ -118,9 +152,22 @@ class NodeActor : public td::actor::Actor {
static constexpr double GET_PEER_EACH = 5;
bool is_completed_{false};
std::vector<td::Promise<td::Unit>> wait_for_completion_;
td::Timestamp will_upload_at_;
std::vector<PendingSetFilePriority> pending_set_file_priority_;
bool header_ready_ = false;
std::map<std::string, size_t> file_name_to_idx_;
std::set<td::uint64> pieces_in_db_;
bool db_store_priorities_paused_ = false;
td::int64 last_stored_meta_count_ = -1;
td::Timestamp next_db_store_meta_at_ = td::Timestamp::now();
void init_torrent();
void init_torrent_header();
void recheck_parts(Torrent::PartsRange range);
void on_signal_from_peer(PeerId peer_id);
void start_up() override;
@ -134,13 +181,23 @@ class NodeActor : public td::actor::Actor {
static constexpr size_t MAX_TOTAL_QUERIES = 20;
static constexpr size_t MAX_PEER_TOTAL_QUERIES = 5;
void loop_queries();
bool try_send_query();
bool try_send_part(PartId part_id);
void loop_get_peers();
void got_peers(td::Result<std::vector<PeerId>> r_peers);
void loop_peer(const PeerId &peer_id, Peer &peer);
void on_part_ready(PartId part_id);
void loop_will_upload();
void got_torrent_info_str(td::BufferSlice data);
void update_pieces_in_db(td::uint64 begin, td::uint64 end);
void db_store_torrent();
void db_store_priorities();
void db_store_torrent_meta();
void after_db_store_torrent_meta(td::Result<td::int64> R);
void db_store_piece(td::uint64 i, std::string s);
void db_erase_piece(td::uint64 i);
void db_update_pieces_list();
};
} // namespace ton

View file

@ -26,12 +26,16 @@
namespace ton {
struct PartsHelper {
public:
PartsHelper(size_t parts_count) : parts_(parts_count), peers_(64) {
explicit PartsHelper(size_t parts_count = 0) : parts_(parts_count), peers_(64) {
peers_[0].is_valid = true;
}
using PartId = size_t;
using PeerToken = size_t;
void init_parts_count(size_t parts_count) {
CHECK(parts_.empty());
parts_.resize(parts_count);
}
PeerToken register_self() {
return self_token_;
}
@ -123,6 +127,22 @@ struct PartsHelper {
change_key(part_id, part->rnd, part->peers_count, 0, part->priority, part->priority);
}
void on_self_part_not_ready(PartId part_id) {
auto peer = get_peer(self_token_);
if (!peer->ready_parts.set_zero(part_id)) {
return;
}
auto part = get_part(part_id);
CHECK(part->is_ready);
part->is_ready = false;
for (auto &peer : peers_) {
if (peer.ready_parts.get(part_id)) {
peer.want_download_count++;
}
}
change_key(part_id, part->rnd, 0, part->peers_count, part->priority, part->priority);
}
struct RarePart {
PartId part_id;
PeerId peer_id;

View file

@ -38,7 +38,7 @@ ton::ton_api::object_ptr<ton::ton_api::storage_state> to_ton_api(const PeerState
return ton::ton_api::make_object<ton::ton_api::storage_state>(state.will_upload, state.want_download);
}
PeerActor::PeerActor(td::unique_ptr<Callback> callback, td::SharedState<PeerState> state)
PeerActor::PeerActor(td::unique_ptr<Callback> callback, std::shared_ptr<PeerState> state)
: callback_(std::move(callback)), state_(std::move(state)) {
CHECK(callback_);
}
@ -50,7 +50,6 @@ td::uint64 PeerActor::create_and_send_query(ArgsT &&... args) {
td::uint64 PeerActor::send_query(td::BufferSlice query) {
auto query_id = next_query_id_++;
//LOG(ERROR) << "send_query " << to_string(ton::fetch_tl_object<ton::ton_api::Function>(std::move(query), true).ok());
callback_->send_query(query_id, std::move(query));
return query_id;
}
@ -64,8 +63,8 @@ void PeerActor::notify_node() {
}
void PeerActor::execute_query(td::BufferSlice query, td::Promise<td::BufferSlice> promise) {
on_pong();
TRY_RESULT_PROMISE(promise, f, ton::fetch_tl_object<ton::ton_api::Function>(std::move(query), true));
//LOG(ERROR) << "execute_query " << to_string(f);
ton::ton_api::downcast_call(
*f, td::overloaded(
[&](ton::ton_api::storage_ping &ping) {
@ -73,6 +72,7 @@ void PeerActor::execute_query(td::BufferSlice query, td::Promise<td::BufferSlice
},
[&](ton::ton_api::storage_addUpdate &add_update) { execute_add_update(add_update, std::move(promise)); },
[&](ton::ton_api::storage_getPiece &get_piece) { execute_get_piece(get_piece, std::move(promise)); },
[&](ton::ton_api::storage_getTorrentInfo &) { execute_get_torrent_info(std::move(promise)); },
[&](auto &other) { promise.set_error(td::Status::Error("Unknown function")); }));
schedule_loop();
}
@ -85,28 +85,29 @@ void PeerActor::on_ping_result(td::Result<td::BufferSlice> r_answer) {
}
void PeerActor::on_pong() {
wait_pong_till_ = td::Timestamp::in(4);
state_.lock()->peer_online_ = true;
wait_pong_till_ = td::Timestamp::in(10);
state_->peer_online_ = true;
notify_node();
}
void PeerActor::on_update_result(td::Result<td::BufferSlice> r_answer) {
update_query_id_ = {};
if (r_answer.is_ok()) {
peer_is_inited_ = true;
have_pieces_list_.clear();
if (!peer_is_inited_) {
peer_init_offset_ += UPDATE_INIT_BLOCK_SIZE;
if (peer_init_offset_ >= have_pieces_.as_slice().size()) {
peer_is_inited_ = true;
}
}
} else {
have_pieces_list_.insert(have_pieces_list_.end(), sent_have_pieces_list_.begin(), sent_have_pieces_list_.end());
}
sent_have_pieces_list_.clear();
}
void PeerActor::on_get_piece_result(PartId piece_id, td::Result<td::BufferSlice> r_answer) {
auto state = state_.lock();
auto it = state->node_queries_.find(piece_id);
if (it == state->node_queries_.end()) {
LOG(ERROR) << "???";
return;
}
//TODO: handle errors ???
it->second = [&]() -> td::Result<PeerState::Part> {
auto res = [&]() -> td::Result<PeerState::Part> {
TRY_RESULT(slice, std::move(r_answer));
TRY_RESULT(piece, ton::fetch_result<ton::ton_api::storage_getPiece>(slice.as_slice()));
PeerState::Part res;
@ -114,6 +115,7 @@ void PeerActor::on_get_piece_result(PartId piece_id, td::Result<td::BufferSlice>
res.proof = std::move(piece->proof_);
return std::move(res);
}();
state_->node_queries_results_.add_element(std::make_pair(piece_id, std::move(res)));
notify_node();
}
@ -123,10 +125,26 @@ void PeerActor::on_update_state_result(td::Result<td::BufferSlice> r_answer) {
}
}
void PeerActor::on_get_info_result(td::Result<td::BufferSlice> r_answer) {
get_info_query_id_ = {};
next_get_info_at_ = td::Timestamp::in(5.0);
alarm_timestamp().relax(next_get_info_at_);
if (r_answer.is_error()) {
return;
}
auto R = fetch_tl_object<ton::ton_api::storage_torrentInfo>(r_answer.move_as_ok(), true);
if (R.is_error()) {
return;
}
td::BufferSlice data = std::move(R.ok_ref()->data_);
if (!data.empty() && !state_->torrent_info_ready_) {
state_->torrent_info_response_callback_(std::move(data));
}
}
void PeerActor::on_query_result(td::uint64 query_id, td::Result<td::BufferSlice> r_answer) {
if (r_answer.is_ok()) {
on_pong();
state_.lock()->download.add(r_answer.ok().size(), td::Timestamp::now());
}
if (ping_query_id_ && ping_query_id_.value() == query_id) {
on_ping_result(std::move(r_answer));
@ -134,11 +152,14 @@ void PeerActor::on_query_result(td::uint64 query_id, td::Result<td::BufferSlice>
on_update_result(std::move(r_answer));
} else if (update_state_query_.query_id && update_state_query_.query_id.value() == query_id) {
on_update_state_result(std::move(r_answer));
} else if (get_info_query_id_ && get_info_query_id_.value() == query_id) {
on_get_info_result(std::move(r_answer));
} else {
for (auto &query_it : node_get_piece_) {
if (query_it.second.query_id && query_it.second.query_id.value() == query_id) {
on_get_piece_result(query_it.first, std::move(r_answer));
query_it.second.query_id = {};
node_get_piece_.erase(query_it.first);
break;
}
}
}
@ -151,8 +172,8 @@ void PeerActor::start_up() {
node_session_id_ = td::Random::secure_uint64();
auto state = state_.lock();
state->peer = actor_id(this);
state_->peer = actor_id(this);
state_->peer_ready_ = true;
notify_node();
schedule_loop();
@ -165,6 +186,7 @@ void PeerActor::loop() {
loop_update_init();
loop_update_state();
loop_update_pieces();
loop_get_torrent_info();
loop_node_get_piece();
loop_peer_get_piece();
@ -175,8 +197,8 @@ void PeerActor::loop() {
void PeerActor::loop_pong() {
if (wait_pong_till_ && wait_pong_till_.is_in_past()) {
wait_pong_till_ = {};
LOG(INFO) << "Disconnected";
state_.lock()->peer_online_ = false;
LOG(DEBUG) << "Disconnected from peer";
state_->peer_online_ = false;
notify_node();
}
alarm_timestamp().relax(wait_pong_till_);
@ -203,25 +225,24 @@ td::BufferSlice PeerActor::create_update_query(ton::tl_object_ptr<ton::ton_api::
}
void PeerActor::loop_update_init() {
if (!peer_session_id_) {
return;
}
if (update_query_id_) {
return;
}
if (peer_is_inited_) {
if (!peer_session_id_ || update_query_id_ || peer_is_inited_) {
return;
}
update_have_pieces();
have_pieces_list_.clear();
auto state = state_.lock();
auto node_state = state_->node_state_.load();
auto s = have_pieces_.as_slice();
if (s.size() <= peer_init_offset_) {
peer_is_inited_ = true;
return;
}
s = s.substr(peer_init_offset_, UPDATE_INIT_BLOCK_SIZE);
auto query = create_update_query(ton::create_tl_object<ton::ton_api::storage_updateInit>(
td::BufferSlice(have_pieces_.as_slice()), to_ton_api(state->node_state_)));
td::BufferSlice(s), (int)peer_init_offset_, to_ton_api(node_state)));
// take care about update_state_query initial state
update_state_query_.state = state->node_state_;
update_state_query_.state = node_state;
update_state_query_.query_id = 0;
update_query_id_ = send_query(std::move(query));
@ -232,9 +253,9 @@ void PeerActor::loop_update_state() {
return;
}
auto state = state_.lock();
if (!(update_state_query_.state == state->node_state_)) {
update_state_query_.state = state->node_state_;
auto node_state = state_->node_state_.load();
if (!(update_state_query_.state == node_state)) {
update_state_query_.state = node_state;
update_state_query_.query_id = {};
}
@ -248,49 +269,45 @@ void PeerActor::loop_update_state() {
}
void PeerActor::update_have_pieces() {
auto state = state_.lock();
have_pieces_list_.insert(have_pieces_list_.end(), state->node_ready_parts_.begin(), state->node_ready_parts_.end());
for (auto piece_id : state->node_ready_parts_) {
auto node_ready_parts = state_->node_ready_parts_.read();
for (auto piece_id : node_ready_parts) {
if (piece_id < peer_init_offset_ + UPDATE_INIT_BLOCK_SIZE) {
have_pieces_list_.push_back(piece_id);
}
have_pieces_.set_one(piece_id);
}
state->node_ready_parts_.clear();
}
void PeerActor::loop_update_pieces() {
if (update_query_id_) {
return;
}
if (!peer_is_inited_) {
if (update_query_id_ || !peer_is_inited_) {
return;
}
update_have_pieces();
if (!have_pieces_list_.empty()) {
size_t count = std::min<size_t>(have_pieces_list_.size(), 1500);
sent_have_pieces_list_.assign(have_pieces_list_.end() - count, have_pieces_list_.end());
have_pieces_list_.erase(have_pieces_list_.end() - count, have_pieces_list_.end());
auto query = create_update_query(ton::create_tl_object<ton::ton_api::storage_updateHavePieces>(
td::transform(have_pieces_list_, [](auto x) { return static_cast<td::int32>(x); })));
td::transform(sent_have_pieces_list_, [](auto x) { return static_cast<td::int32>(x); })));
update_query_id_ = send_query(std::move(query));
}
}
void PeerActor::loop_node_get_piece() {
auto state = state_.lock();
for (auto it = node_get_piece_.begin(); it != node_get_piece_.end();) {
auto other_it = state->node_queries_.find(it->first);
if (other_it == state->node_queries_.end() || other_it->second) {
it = node_get_piece_.erase(it);
} else {
it++;
}
void PeerActor::loop_get_torrent_info() {
if (get_info_query_id_ || state_->torrent_info_ready_) {
return;
}
if (next_get_info_at_ && !next_get_info_at_.is_in_past()) {
return;
}
get_info_query_id_ = create_and_send_query<ton::ton_api::storage_getTorrentInfo>();
}
for (auto &query_it : state->node_queries_) {
if (query_it.second) {
continue;
}
node_get_piece_.emplace(query_it.first, NodePieceQuery{});
void PeerActor::loop_node_get_piece() {
for (auto part : state_->node_queries_.read()) {
node_get_piece_.emplace(part, NodePieceQuery{});
}
for (auto &query_it : node_get_piece_) {
@ -304,40 +321,29 @@ void PeerActor::loop_node_get_piece() {
}
void PeerActor::loop_peer_get_piece() {
auto state = state_.lock();
// process answers
for (auto &it : state->peer_queries_) {
if (!it.second) {
continue;
}
auto promise_it = peer_get_piece_.find(it.first);
for (auto &p : state_->peer_queries_results_.read()) {
state_->peer_queries_active_.erase(p.first);
auto promise_it = peer_get_piece_.find(p.first);
if (promise_it == peer_get_piece_.end()) {
continue;
}
promise_it->second.promise.set_result(it.second.unwrap().move_map([](PeerState::Part part) {
promise_it->second.promise.set_result(p.second.move_map([](PeerState::Part part) {
return ton::create_serialize_tl_object<ton::ton_api::storage_piece>(std::move(part.proof), std::move(part.data));
}));
peer_get_piece_.erase(promise_it);
}
// erase unneeded queries
for (auto it = state->peer_queries_.begin(); it != state->peer_queries_.end();) {
if (peer_get_piece_.count(it->first) == 0) {
it = state->peer_queries_.erase(it);
notify_node();
} else {
it++;
}
notify_node();
}
// create queries
std::vector<td::uint32> new_peer_queries;
for (auto &query_it : peer_get_piece_) {
auto res = state->peer_queries_.emplace(query_it.first, td::optional<td::Result<PeerState::Part>>());
if (res.second) {
if (state_->peer_queries_active_.insert(query_it.first).second) {
new_peer_queries.push_back(query_it.first);
notify_node();
}
}
state_->peer_queries_.add_elements(std::move(new_peer_queries));
}
void PeerActor::loop_notify_node() {
@ -345,13 +351,14 @@ void PeerActor::loop_notify_node() {
return;
}
need_notify_node_ = false;
state_.lock()->notify_node();
state_->notify_node();
}
void PeerActor::execute_ping(td::uint64 session_id, td::Promise<td::BufferSlice> promise) {
if (!peer_session_id_ || peer_session_id_.value() != session_id) {
peer_session_id_ = session_id;
peer_is_inited_ = false;
peer_init_offset_ = 0;
update_query_id_ = {};
update_state_query_.query_id = {};
@ -369,11 +376,11 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update,
promise.set_value(ton::create_serialize_tl_object<ton::ton_api::storage_ok>());
auto state = state_.lock();
std::vector<td::uint32> new_peer_ready_parts;
auto add_piece = [&](PartId id) {
if (!peer_have_pieces_.get(id)) {
peer_have_pieces_.set_one(id);
state->peer_ready_parts_.push_back(id);
new_peer_ready_parts.push_back(id);
notify_node();
}
};
@ -383,15 +390,15 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update,
if (peer_seqno_ >= seqno) {
return;
}
if (state->peer_state_ && state->peer_state_.value() == peer_state) {
if (state_->peer_state_ready_ && state_->peer_state_.load() == peer_state) {
return;
}
peer_seqno_ = seqno;
state->peer_state_ = peer_state;
state_->peer_state_.exchange(peer_state);
state_->peer_state_ready_ = true;
notify_node();
};
//LOG(ERROR) << "Got " << to_string(add_update);
downcast_call(*add_update.update_,
td::overloaded(
[&](ton::ton_api::storage_updateHavePieces &have_pieces) {
@ -404,16 +411,24 @@ void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update,
update_peer_state(from_ton_api(*init.state_));
td::Bitset new_bitset;
new_bitset.set_raw(init.have_pieces_.as_slice().str());
size_t offset = init.have_pieces_offset_ * 8;
for (auto size = new_bitset.size(), i = size_t(0); i < size; i++) {
if (new_bitset.get(i)) {
add_piece(static_cast<PartId>(i));
add_piece(static_cast<PartId>(offset + i));
}
}
}));
state_->peer_ready_parts_.add_elements(std::move(new_peer_ready_parts));
}
void PeerActor::execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise<td::BufferSlice> promise) {
PartId piece_id = get_piece.piece_id_;
peer_get_piece_[piece_id] = {std::move(promise)};
}
void PeerActor::execute_get_torrent_info(td::Promise<td::BufferSlice> promise) {
td::BufferSlice result = create_serialize_tl_object<ton_api::storage_torrentInfo>(
state_->torrent_info_ready_ ? state_->torrent_info_str_->clone() : td::BufferSlice());
promise.set_result(std::move(result));
}
} // namespace ton

View file

@ -21,7 +21,6 @@
#include "Bitset.h"
#include "PeerState.h"
#include "SharedState.h"
#include "td/utils/optional.h"
@ -38,14 +37,14 @@ class PeerActor : public td::actor::Actor {
virtual void send_query(td::uint64 query_id, td::BufferSlice query) = 0;
};
PeerActor(td::unique_ptr<Callback> callback, td::SharedState<PeerState> state);
PeerActor(td::unique_ptr<Callback> callback, std::shared_ptr<PeerState> state);
void execute_query(td::BufferSlice query, td::Promise<td::BufferSlice> promise);
void on_query_result(td::uint64 query_id, td::Result<td::BufferSlice> r_answer);
private:
td::unique_ptr<Callback> callback_;
td::SharedState<PeerState> state_;
std::shared_ptr<PeerState> state_;
bool need_notify_node_{false};
td::uint64 next_query_id_{0};
@ -53,7 +52,9 @@ class PeerActor : public td::actor::Actor {
// ping
td::Timestamp next_ping_at_;
td::optional<td::uint64> ping_query_id_;
td::optional<td::uint64> get_info_query_id_;
td::Timestamp wait_pong_till_;
td::Timestamp next_get_info_at_;
// startSession
td::uint64 node_session_id_;
@ -63,15 +64,17 @@ class PeerActor : public td::actor::Actor {
td::optional<td::uint64> peer_session_id_;
td::optional<td::uint64> update_query_id_;
bool peer_is_inited_{false};
size_t peer_init_offset_{0};
td::uint32 node_seqno_{0};
td::Bitset have_pieces_;
std::vector<PartId> have_pieces_list_;
std::vector<PartId> sent_have_pieces_list_;
td::uint32 peer_seqno_{0};
// update state
struct UpdateState {
td::optional<td::uint64> query_id;
PeerState::State state;
PeerState::State state{false, false};
};
UpdateState update_state_query_;
@ -102,6 +105,7 @@ class PeerActor : public td::actor::Actor {
void loop_update_init();
void loop_update_pieces();
void update_have_pieces();
void loop_get_torrent_info();
void loop_update_state();
@ -112,14 +116,14 @@ class PeerActor : public td::actor::Actor {
void loop_peer_get_piece();
void execute_add_update(ton::ton_api::storage_addUpdate &add_update, td::Promise<td::BufferSlice> promise);
void execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise<td::BufferSlice> promise);
void execute_get_torrent_info(td::Promise<td::BufferSlice> promise);
void on_update_result(td::Result<td::BufferSlice> r_answer);
void on_get_piece_result(PartId piece_id, td::Result<td::BufferSlice> r_answer);
void on_update_state_result(td::Result<td::BufferSlice> r_answer);
void on_get_info_result(td::Result<td::BufferSlice> r_answer);
template <class T, class... ArgsT>
td::uint64 create_and_send_query(ArgsT &&... args);
@ -127,5 +131,7 @@ class PeerActor : public td::actor::Actor {
void schedule_loop();
void notify_node();
static const size_t UPDATE_INIT_BLOCK_SIZE = 6000;
};
} // namespace ton

275
storage/PeerManager.h Normal file
View file

@ -0,0 +1,275 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "rldp2/rldp.h"
#include "td/actor/actor.h"
#include "overlay/overlay.h"
#include "NodeActor.h"
namespace ton_rldp = ton::rldp2;
class PeerManager : public td::actor::Actor {
public:
PeerManager(ton::adnl::AdnlNodeIdShort adnl_id, ton::overlay::OverlayIdFull overlay_id, bool client_mode,
td::actor::ActorId<ton::overlay::Overlays> overlays, td::actor::ActorId<ton::adnl::Adnl> adnl,
td::actor::ActorId<ton_rldp::Rldp> rldp)
: overlay_id_(std::move(overlay_id))
, client_mode_(client_mode)
, overlays_(std::move(overlays))
, adnl_(std::move(adnl))
, rldp_(std::move(rldp)) {
CHECK(register_adnl_id(adnl_id) == 1);
}
void start_up() override {
}
void tear_down() override {
for (const auto& p : subscribed_peers_) {
if (p.second > 0) {
auto adnl_id = peer_to_andl(p.first);
if (adnl_id.is_ok()) {
send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, adnl_id.move_as_ok(),
overlay_id_.compute_short_id());
}
}
}
}
void send_query(ton::PeerId src, ton::PeerId dst, td::BufferSlice query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
TRY_RESULT_PROMISE(promise, dst_id, peer_to_andl(dst));
send_closure(overlays_, &ton::overlay::Overlays::send_query_via, dst_id, src_id, overlay_id_.compute_short_id(), "",
std::move(promise), td::Timestamp::in(10), std::move(query), 1 << 25, rldp_);
}
void execute_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) {
auto src_id = register_adnl_id(src);
auto dst_id = register_adnl_id(dst);
auto it = peers_.find(std::make_pair(dst_id, src_id));
if (it == peers_.end()) {
auto node_it = nodes_.find(dst_id);
if (node_it == nodes_.end()) {
LOG(ERROR) << "Unknown query destination";
promise.set_error(td::Status::Error("Unknown query destination"));
return;
}
if (!node_it->second.is_alive()) {
LOG(ERROR) << "Expired query destination";
promise.set_error(td::Status::Error("Unknown query destination"));
return;
}
send_closure(node_it->second, &ton::NodeActor::start_peer, src_id,
[promise = std::move(promise),
data = std::move(data)](td::Result<td::actor::ActorId<ton::PeerActor>> r_peer) mutable {
TRY_RESULT_PROMISE(promise, peer, std::move(r_peer));
send_closure(peer, &ton::PeerActor::execute_query, std::move(data), std::move(promise));
});
return;
}
send_closure(it->second, &ton::PeerActor::execute_query, std::move(data), std::move(promise));
}
void register_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId<ton::PeerActor> peer) {
peers_[std::make_pair(src, dst)] = std::move(peer);
register_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void register_node(ton::PeerId src, td::actor::ActorId<ton::NodeActor> node) {
nodes_[src] = std::move(node);
register_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_node(ton::PeerId src, td::actor::ActorId<ton::NodeActor> node) {
auto it = nodes_.find(src);
CHECK(it != nodes_.end());
if (it->second == node) {
nodes_.erase(it);
}
unregister_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId<ton::PeerActor> peer) {
auto it = peers_.find(std::make_pair(src, dst));
CHECK(it != peers_.end());
if (it->second == peer) {
peers_.erase(it);
}
unregister_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_src(ton::PeerId src, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
if (--subscribed_peers_[src] == 0) {
subscribed_peers_.erase(src);
send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, src_id, overlay_id_.compute_short_id());
}
promise.set_value({});
}
void register_src(ton::PeerId src, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
if (subscribed_peers_[src]++ == 0) {
auto rules = ton::overlay::OverlayPrivacyRules{};
class Callback : public ton::overlay::Overlays::Callback {
public:
explicit Callback(td::actor::ActorId<PeerManager> peer_manager, ton::adnl::AdnlNodeIdShort dst)
: peer_manager_(std::move(peer_manager)), dst_(dst) {
}
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id,
td::BufferSlice data) override {
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id,
td::BufferSlice data, td::Promise<td::BufferSlice> promise) override {
td::actor::send_closure(peer_manager_, &PeerManager::execute_query, src, dst_, std::move(data),
std::move(promise));
}
void receive_broadcast(ton::PublicKeyHash src, ton::overlay::OverlayIdShort overlay_id,
td::BufferSlice data) override {
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
ton::adnl::AdnlNodeIdShort dst_;
};
send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay_ex, src_id, overlay_id_.clone(),
std::make_unique<Callback>(actor_id(this), src_id), rules, R"({ "type": "storage" })",
!client_mode_);
}
promise.set_value({});
}
td::Result<ton::adnl::AdnlNodeIdShort> peer_to_andl(ton::PeerId id) {
if (id <= 0 || id > adnl_ids_.size()) {
return td::Status::Error(PSLICE() << "Invalid peer id " << id);
}
return adnl_ids_[id - 1];
}
ton::PeerId register_adnl_id(ton::adnl::AdnlNodeIdShort id) {
auto it = adnl_to_peer_id_.emplace(id, next_peer_id_);
if (it.second) {
adnl_ids_.push_back(id);
next_peer_id_++;
}
return it.first->second;
}
void get_peers(ton::PeerId src, td::Promise<std::vector<ton::PeerId>> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
send_closure(overlays_, &ton::overlay::Overlays::get_overlay_random_peers, src_id, overlay_id_.compute_short_id(),
30, promise.send_closure(actor_id(this), &PeerManager::got_overlay_random_peers));
}
void get_peer_info(ton::PeerId src, ton::PeerId peer, td::Promise<std::pair<td::Bits256, std::string>> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
TRY_RESULT_PROMISE(promise, peer_id, peer_to_andl(peer));
td::actor::send_closure(
adnl_, &ton::adnl::Adnl::get_conn_ip_str, src_id, peer_id,
promise.wrap([peer_id](std::string s) { return std::make_pair(peer_id.bits256_value(), std::move(s)); }));
}
static td::unique_ptr<ton::NodeActor::NodeCallback> create_callback(td::actor::ActorId<PeerManager> peer_manager) {
class Context : public ton::NodeActor::NodeCallback {
public:
Context(td::actor::ActorId<PeerManager> peer_manager) : peer_manager_(peer_manager) {
}
void get_peers(ton::PeerId src, td::Promise<std::vector<ton::PeerId>> promise) override {
send_closure(peer_manager_, &PeerManager::get_peers, src, std::move(promise));
}
void register_self(td::actor::ActorId<ton::NodeActor> self) override {
CHECK(self_.empty());
self_ = self;
send_closure(peer_manager_, &PeerManager::register_node, 1, self_);
}
~Context() override {
if (!self_.empty()) {
send_closure(peer_manager_, &PeerManager::unregister_node, 1, self_);
}
}
td::actor::ActorOwn<ton::PeerActor> create_peer(ton::PeerId self_id, ton::PeerId peer_id,
std::shared_ptr<ton::PeerState> state) override {
CHECK(self_id == 1);
class PeerCallback : public ton::PeerActor::Callback {
public:
PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId<PeerManager> peer_manager)
: self_id_(self_id), peer_id_(peer_id), peer_manager_(std::move(peer_manager)) {
}
void register_self(td::actor::ActorId<ton::PeerActor> self) override {
CHECK(self_.empty());
self_ = std::move(self);
send_closure(peer_manager_, &PeerManager::register_peer, self_id_, peer_id_, self_);
}
void send_query(td::uint64 query_id, td::BufferSlice query) override {
send_closure(peer_manager_, &PeerManager::send_query, self_id_, peer_id_, std::move(query),
promise_send_closure(self_, &ton::PeerActor::on_query_result, query_id));
}
~PeerCallback() {
if (!self_.empty()) {
send_closure(peer_manager_, &PeerManager::unregister_peer, self_id_, peer_id_, self_);
}
}
private:
td::actor::ActorId<ton::PeerActor> self_;
ton::PeerId self_id_;
ton::PeerId peer_id_;
td::actor::ActorId<PeerManager> peer_manager_;
};
return td::actor::create_actor<ton::PeerActor>(PSLICE() << "PeerActor " << peer_id,
td::make_unique<PeerCallback>(self_id, peer_id, peer_manager_),
std::move(state));
}
void get_peer_info(ton::PeerId src, ton::PeerId peer,
td::Promise<std::pair<td::Bits256, std::string>> promise) override {
td::actor::send_closure(peer_manager_, &PeerManager::get_peer_info, src, peer, std::move(promise));
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
std::vector<ton::PeerId> peers_;
td::actor::ActorId<ton::NodeActor> self_;
};
return td::make_unique<Context>(std::move(peer_manager));
}
private:
ton::overlay::OverlayIdFull overlay_id_;
bool client_mode_ = false;
td::actor::ActorId<ton::overlay::Overlays> overlays_;
td::actor::ActorId<ton::adnl::Adnl> adnl_;
td::actor::ActorId<ton_rldp::Rldp> rldp_;
std::map<std::pair<ton::PeerId, ton::PeerId>, td::actor::ActorId<ton::PeerActor>> peers_;
std::map<ton::PeerId, td::actor::ActorId<ton::NodeActor>> nodes_;
ton::PeerId next_peer_id_{1};
std::map<ton::adnl::AdnlNodeIdShort, ton::PeerId> adnl_to_peer_id_;
std::vector<ton::adnl::AdnlNodeIdShort> adnl_ids_;
std::map<ton::PeerId, td::uint32> subscribed_peers_;
void got_overlay_random_peers(td::Result<std::vector<ton::adnl::AdnlNodeIdShort>> r_peers,
td::Promise<std::vector<ton::PeerId>> promise) {
TRY_RESULT_PROMISE(promise, peers, std::move(r_peers));
std::vector<ton::PeerId> res;
for (auto peer : peers) {
res.push_back(register_adnl_id(peer));
}
promise.set_value(std::move(res));
}
};

View file

@ -27,7 +27,7 @@ void PeerState::notify_node() {
}
void PeerState::notify_peer() {
if (peer.empty()) {
if (!peer_ready_) {
return;
}
td::actor::send_signals_later(peer, td::actor::ActorSignals::wakeup());

View file

@ -25,17 +25,69 @@
#include "td/actor/actor.h"
#include <map>
#include "LoadSpeed.h"
#include <atomic>
namespace ton {
using PeerId = td::uint64;
using PartId = td::uint32;
// Concurrent buffer for messages with one writer and one reader
// Reader reads all existing messages at once
// TODO: Use some better algorithm here, or maybe a concurrent queue
template <typename T>
class MessageBuffer {
public:
MessageBuffer() = default;
MessageBuffer(const MessageBuffer<T>&) = delete;
MessageBuffer& operator=(const MessageBuffer<T>&) = delete;
~MessageBuffer() {
delete ptr_.load();
}
void add_element(T x) {
std::vector<T>* vec = ptr_.exchange(nullptr);
if (vec == nullptr) {
vec = new std::vector<T>();
}
vec->push_back(std::move(x));
CHECK(ptr_.exchange(vec) == nullptr);
}
void add_elements(std::vector<T> elements) {
if (elements.empty()) {
return;
}
std::vector<T>* vec = ptr_.exchange(nullptr);
if (vec == nullptr) {
vec = new std::vector<T>(std::move(elements));
} else {
for (auto& x : elements) {
vec->push_back(std::move(x));
}
}
CHECK(ptr_.exchange(vec) == nullptr);
}
std::vector<T> read() {
std::vector<T>* vec = ptr_.exchange(nullptr);
std::vector<T> result;
if (vec != nullptr) {
result = std::move(*vec);
delete vec;
}
return result;
}
private:
std::atomic<std::vector<T>*> ptr_{nullptr};
};
struct PeerState {
explicit PeerState(td::actor::ActorId<> node) : node(std::move(node)) {
}
struct State {
bool will_upload{false};
bool want_download{false};
bool will_upload;
bool want_download;
auto key() const {
return std::tie(will_upload, want_download);
}
@ -43,31 +95,39 @@ struct PeerState {
return key() == other.key();
}
};
State node_state_;
td::optional<State> peer_state_;
bool peer_online_{false};
// Thread-safe fields
std::atomic<State> node_state_{State{false, false}};
std::atomic_bool peer_state_ready_{false};
std::atomic<State> peer_state_{State{false, false}};
std::atomic_bool peer_online_{false};
struct Part {
td::BufferSlice proof;
td::BufferSlice data;
};
std::map<PartId, td::optional<td::Result<Part>>> node_queries_;
std::map<PartId, td::optional<td::Result<Part>>> peer_queries_;
std::set<PartId> node_queries_active_; // Node only
MessageBuffer<PartId> node_queries_; // Node -> Peer
MessageBuffer<std::pair<PartId, td::Result<Part>>> node_queries_results_; // Peer -> Node
std::set<PartId> peer_queries_active_; // Peer only
MessageBuffer<PartId> peer_queries_; // Peer -> Node
MessageBuffer<std::pair<PartId, td::Result<Part>>> peer_queries_results_; // Node -> Peer
// Peer -> Node
// update are added to this vector, so reader will be able to process all changes
std::vector<PartId> peer_ready_parts_;
MessageBuffer<PartId> peer_ready_parts_;
// Node -> Peer
MessageBuffer<PartId> node_ready_parts_;
// Node -> Peer
// writer writes all new parts to this vector. This state will be eventually synchornized with a peer
std::vector<PartId> node_ready_parts_;
std::atomic_bool torrent_info_ready_{false};
std::shared_ptr<td::BufferSlice> torrent_info_str_;
std::function<void(td::BufferSlice)> torrent_info_response_callback_;
td::actor::ActorId<> node;
const td::actor::ActorId<> node;
std::atomic_bool peer_ready_{false};
td::actor::ActorId<> peer;
LoadSpeed upload;
LoadSpeed download;
void notify_node();
void notify_peer();
};

View file

@ -1,67 +0,0 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
Copyright 2017-2020 Telegram Systems LLP
*/
#pragma once
#include <atomic>
#include "td/utils/MovableValue.h"
#include <memory>
namespace td {
template <class T>
class SharedState {
public:
friend class Guard;
class Guard {
public:
Guard(Guard &&) = default;
Guard(SharedState<T> *self) : self(self) {
CHECK(!self->data_->is_locked.exchange(true));
}
~Guard() {
if (self.get()) {
CHECK(self.get()->data_->is_locked.exchange(false));
}
}
T *get() {
return &self.get()->data_->data;
}
T *operator->() {
return get();
}
private:
td::MovableValue<SharedState<T> *> self;
};
auto lock() {
return Guard{this};
}
auto unsafe() {
return &data_->data;
}
private:
struct Data {
std::atomic<bool> is_locked{};
T data;
};
std::shared_ptr<Data> data_{std::make_shared<Data>()};
};
} // namespace td

View file

@ -23,29 +23,30 @@
#include "td/utils/crypto.h"
#include "td/utils/port/Stat.h"
#include "td/utils/tl_helpers.h"
#include "td/utils/port/path.h"
namespace ton {
Torrent::Torrent(TorrentMeta meta)
: info_(meta.info)
, merkle_tree_(info_.pieces_count(), info_.root_hash)
, piece_is_ready_(info_.pieces_count(), false) {
not_ready_piece_count_ = piece_is_ready_.size();
header_pieces_count_ = (info_.header_size + info_.piece_size - 1) / info_.piece_size;
not_ready_pending_piece_count_ = header_pieces_count_;
if (meta.header) {
set_header(meta.header.unwrap());
} else {
header_str_ = td::BufferSlice(info_.header_size);
}
if (meta.root_proof.not_null()) {
merkle_tree_.add_proof(meta.root_proof);
td::Result<Torrent> Torrent::open(Options options, td::Bits256 hash) {
Torrent res(hash);
if (!options.in_memory) {
if (options.root_dir.empty()) {
options.root_dir = ".";
}
res.set_root_dir(options.root_dir);
}
return std::move(res);
}
td::Result<Torrent> Torrent::open(Options options, TorrentMeta meta) {
Torrent res(std::move(meta));
Torrent res(td::Bits256(meta.info.get_hash()));
TRY_STATUS(res.init_info(std::move(meta.info)));
if (meta.header) {
TRY_STATUS(res.set_header(meta.header.unwrap()));
}
if (meta.root_proof.not_null()) {
TRY_STATUS(res.merkle_tree_.add_proof(meta.root_proof));
}
if (!options.in_memory) {
if (options.root_dir.empty()) {
options.root_dir = ".";
@ -64,6 +65,7 @@ td::Result<Torrent> Torrent::open(Options options, td::Slice meta_str) {
}
const Torrent::Info &Torrent::get_info() const {
CHECK(inited_info_);
return info_;
}
@ -103,6 +105,9 @@ td::Status Torrent::iterate_piece(Info::PieceInfo piece, F &&f) {
}
bool Torrent::is_piece_ready(td::uint64 piece_i) const {
if (!inited_info_) {
return false;
}
CHECK(piece_i < info_.pieces_count());
return piece_is_ready_[piece_i];
}
@ -133,7 +138,8 @@ Torrent::PartsRange Torrent::get_file_parts_range(size_t i) {
return res;
}
Torrent::PartsRange Torrent::get_header_parts_range() {
Torrent::PartsRange Torrent::get_header_parts_range() const {
CHECK(inited_info_);
PartsRange res;
res.begin = 0;
res.end = header_pieces_count_;
@ -184,11 +190,14 @@ std::string Torrent::get_stats_str() const {
}
void Torrent::validate() {
CHECK(header_);
if (!inited_info_ || !header_) {
return;
}
std::fill(piece_is_ready_.begin(), piece_is_ready_.end(), false);
not_ready_piece_count_ = info_.pieces_count();
included_ready_size_ = 0;
for (auto &chunk : chunks_) {
chunk.ready_size = 0;
if (root_dir_) {
@ -200,31 +209,25 @@ void Torrent::validate() {
}
std::vector<td::UInt256> hashes;
std::vector<MerkleTree::Chunk> chunks;
std::vector<std::pair<size_t, td::Bits256>> pieces;
auto flush = [&] {
td::Bitset bitmask;
merkle_tree_.add_chunks(chunks, bitmask);
for (size_t i = 0; i < chunks.size(); i++) {
if (!bitmask.get(i)) {
continue;
}
auto piece_i = chunks[i].index;
for (size_t piece_i : merkle_tree_.add_pieces(std::move(pieces))) {
auto piece = info_.get_piece_info(piece_i);
iterate_piece(piece, [&](auto it, auto info) {
it->ready_size += info.size;
if (!it->excluded) {
included_ready_size_ += info.size;
}
return td::Status::OK();
});
piece_is_ready_[piece_i] = true;
ready_parts_count_++;
CHECK(not_ready_piece_count_);
not_ready_piece_count_--;
}
hashes.clear();
chunks.clear();
pieces.clear();
};
td::BufferSlice buf(info_.piece_size);
@ -246,24 +249,23 @@ void Torrent::validate() {
auto dest = buf.as_slice().truncate(info.size);
TRY_STATUS(it->get_piece(dest, info.chunk_offset, &cache));
sha256.feed(dest);
//LOG(ERROR) << dest;
return td::Status::OK();
});
if (is_ok.is_error()) {
LOG_IF(ERROR, !skipped) << "Failed: " << is_ok;
LOG(ERROR) << "Failed: " << is_ok;
continue;
}
MerkleTree::Chunk chunk;
chunk.index = piece_i;
sha256.extract(chunk.hash.as_slice());
chunks.push_back(chunk);
td::Bits256 hash;
sha256.extract(hash.as_slice());
pieces.emplace_back(piece_i, hash);
}
flush();
}
td::Result<std::string> Torrent::get_piece_data(td::uint64 piece_i) {
if (!inited_info_) {
return td::Status::Error("Torrent info not inited");
}
CHECK(piece_i < info_.pieces_count());
if (!piece_is_ready_[piece_i]) {
return td::Status::Error("Piece is not ready");
@ -272,6 +274,10 @@ td::Result<std::string> Torrent::get_piece_data(td::uint64 piece_i) {
if (it != pending_pieces_.end()) {
return it->second;
}
auto it2 = in_memory_pieces_.find(piece_i);
if (it2 != in_memory_pieces_.end()) {
return it2->second.data;
}
auto piece = info_.get_piece_info(piece_i);
std::string res(piece.size, '\0');
@ -282,68 +288,114 @@ td::Result<std::string> Torrent::get_piece_data(td::uint64 piece_i) {
}
td::Result<td::Ref<vm::Cell>> Torrent::get_piece_proof(td::uint64 piece_i) {
if (!inited_info_) {
return td::Status::Error("Torrent info not inited");
}
CHECK(piece_i < info_.pieces_count());
return merkle_tree_.gen_proof(piece_i, piece_i);
}
td::Status Torrent::add_piece(td::uint64 piece_i, td::Slice data, td::Ref<vm::Cell> proof) {
TRY_STATUS(merkle_tree_.add_proof(proof));
//LOG(ERROR) << "Add piece #" << piece_i;
if (fatal_error_.is_error()) {
return fatal_error_.clone().move_as_error_prefix("Fatal error: ");
}
if (!inited_info_) {
return td::Status::Error("Torrent info not inited");
}
if (!proof.is_null()) {
TRY_STATUS(merkle_tree_.add_proof(proof));
}
CHECK(piece_i < info_.pieces_count());
if (piece_is_ready_[piece_i]) {
return td::Status::OK();
}
td::Bits256 hash;
td::sha256(data, hash.as_slice());
TRY_RESULT(expected_hash, merkle_tree_.get_piece_hash(piece_i));
if (expected_hash != hash) {
return td::Status::Error("Hash mismatch");
}
piece_is_ready_[piece_i] = true;
ready_parts_count_++;
ton::MerkleTree::Chunk chunk;
chunk.index = piece_i;
td::sha256(data, chunk.hash.as_slice());
TRY_STATUS(merkle_tree_.try_add_chunks({chunk}));
if (chunks_.empty()) {
return add_header_piece(piece_i, data);
if (chunks_.empty() || !enabled_wirte_to_files_) {
return add_pending_piece(piece_i, data);
}
return add_validated_piece(piece_i, data);
}
td::Status Torrent::add_header_piece(td::uint64 piece_i, td::Slice data) {
td::Status Torrent::add_proof(td::Ref<vm::Cell> proof) {
if (!inited_info_) {
return td::Status::Error("Torrent info not inited");
}
return merkle_tree_.add_proof(std::move(proof));
}
td::Status Torrent::add_pending_piece(td::uint64 piece_i, td::Slice data) {
pending_pieces_[piece_i] = data.str();
if (piece_i < header_pieces_count_) {
//LOG(ERROR) << "Add header piece #" << piece_i;
auto piece = info_.get_piece_info(piece_i);
auto dest = header_str_.as_slice().substr(piece.offset);
data.truncate(dest.size());
dest.copy_from(data);
not_ready_pending_piece_count_--;
if (not_ready_pending_piece_count_ == 0) {
//LOG(ERROR) << "Got full header";
TorrentHeader header;
TRY_STATUS(td::unserialize(header, header_str_.as_slice())); // TODO: it is a fatal error
set_header(header);
for (auto &it : pending_pieces_) {
TRY_STATUS(add_validated_piece(it.first, it.second));
auto S = td::unserialize(header, header_str_.as_slice());
if (S.is_ok()) {
S = set_header(std::move(header));
}
if (S.is_error()) {
S = S.move_as_error_prefix("Invalid torrent header: ");
fatal_error_ = S.clone();
return S;
}
if (enabled_wirte_to_files_) {
add_pending_pieces();
}
pending_pieces_.clear();
}
} else {
LOG(ERROR) << "PENDING";
}
return td::Status::OK();
}
std::string Torrent::get_chunk_path(td::Slice name) {
void Torrent::enable_write_to_files() {
if (enabled_wirte_to_files_) {
return;
}
enabled_wirte_to_files_ = true;
if (header_) {
add_pending_pieces();
}
}
void Torrent::add_pending_pieces() {
for (auto &p : pending_pieces_) {
td::Status S = add_validated_piece(p.first, std::move(p.second));
if (S.is_error()) {
LOG(WARNING) << "Failed to add pending piece #" << p.first << ": " << S;
}
}
pending_pieces_.clear();
}
std::string Torrent::get_chunk_path(td::Slice name) const {
return PSTRING() << root_dir_.value() << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH << name;
}
std::string Torrent::get_file_path(size_t i) const {
return get_chunk_path(chunks_.at(i + 1).name);
}
td::Status Torrent::init_chunk_data(ChunkState &chunk) {
if (chunk.data) {
return td::Status::OK();
}
if (root_dir_) {
TRY_RESULT(data, td::FileNoCacheBlobView::create(get_chunk_path(chunk.name), chunk.size, true));
std::string path = get_chunk_path(chunk.name);
TRY_STATUS(td::mkpath(path));
TRY_RESULT(data, td::FileNoCacheBlobView::create(path, chunk.size, true));
chunk.data = std::move(data);
} else {
chunk.data = td::BufferSliceBlobView::create(td::BufferSlice(chunk.size));
@ -355,22 +407,41 @@ td::Status Torrent::add_validated_piece(td::uint64 piece_i, td::Slice data) {
CHECK(!chunks_.empty());
auto piece = info_.get_piece_info(piece_i);
std::set<size_t> excluded;
TRY_STATUS(iterate_piece(piece, [&](auto it, auto info) {
if (it->excluded) {
excluded.insert(it - chunks_.begin());
return td::Status::OK();
}
TRY_STATUS(init_chunk_data(*it));
return it->add_piece(data.substr(info.piece_offset, info.size), info.chunk_offset);
TRY_STATUS(it->write_piece(data.substr(info.piece_offset, info.size), info.chunk_offset));
return td::Status::OK();
}));
TRY_STATUS(iterate_piece(piece, [&](auto it, auto info) {
if (!it->excluded) {
it->ready_size += info.size;
included_ready_size_ += info.size;
}
return td::Status::OK();
}));
piece_is_ready_[piece_i] = true;
not_ready_piece_count_--;
if (!excluded.empty()) {
in_memory_pieces_[piece_i] = {data.str(), std::move(excluded)};
}
return td::Status::OK();
}
bool Torrent::is_completed() const {
return not_ready_piece_count_ == 0;
return inited_info_ && enabled_wirte_to_files_ && included_ready_size_ == included_size_;
}
td::Result<td::BufferSlice> Torrent::read_file(td::Slice name) {
if (!inited_info_) {
return td::Status::Error("Torrent info not inited");
}
for (auto &chunk : chunks_) {
if (chunk.name == name) {
td::BufferSlice res(chunk.size);
@ -383,10 +454,12 @@ td::Result<td::BufferSlice> Torrent::read_file(td::Slice name) {
Torrent::GetMetaOptions::GetMetaOptions() = default;
std::string Torrent::get_meta_str(const GetMetaOptions &options) const {
CHECK(inited_info_);
return get_meta(options).serialize();
}
TorrentMeta Torrent::get_meta(const GetMetaOptions &options) const {
CHECK(inited_info_);
TorrentMeta torrent_file;
if (options.with_header) {
torrent_file.header = header_;
@ -399,17 +472,36 @@ TorrentMeta Torrent::get_meta(const GetMetaOptions &options) const {
return torrent_file;
}
Torrent::Torrent(Info info, td::optional<TorrentHeader> header, ton::MerkleTree tree, std::vector<ChunkState> chunks)
: info_(info)
Torrent::Torrent(td::Bits256 hash) : hash_(hash), inited_info_(false) {
}
Torrent::Torrent(Info info, td::optional<TorrentHeader> header, ton::MerkleTree tree, std::vector<ChunkState> chunks,
std::string root_dir)
: hash_(info.get_hash())
, inited_info_(true)
, info_(info)
, root_dir_(std::move(root_dir))
, header_(std::move(header))
, enabled_wirte_to_files_(true)
, merkle_tree_(std::move(tree))
, piece_is_ready_(info_.pieces_count(), true)
, ready_parts_count_{info_.pieces_count()}
, chunks_(std::move(chunks)) {
, chunks_(std::move(chunks))
, included_size_(info_.file_size)
, included_ready_size_(info_.file_size) {
}
void Torrent::set_header(const TorrentHeader &header) {
header_ = header;
td::Status Torrent::set_header(TorrentHeader header) {
if (header_) {
return td::Status::OK();
}
auto header_str = header.serialize();
td::Bits256 header_hash;
td::sha256(header_str.as_slice(), header_hash.as_slice());
if (header_hash != info_.header_hash) {
return td::Status::Error("Incorrect header hash");
}
TRY_STATUS_PREFIX(header.validate(info_.file_size, info_.header_size), "Invalid torrent header: ");
auto add_chunk = [&](td::Slice name, td::uint64 offset, td::uint64 size) {
ChunkState chunk;
chunk.name = name.str();
@ -417,14 +509,17 @@ void Torrent::set_header(const TorrentHeader &header) {
chunk.size = size;
chunk.offset = offset;
chunks_.push_back(std::move(chunk));
included_size_ += size;
};
add_chunk("", 0, header.serialization_size());
chunks_.back().data = td::BufferSliceBlobView::create(header.serialize());
add_chunk("", 0, header_str.size());
chunks_.back().data = td::BufferSliceBlobView::create(std::move(header_str));
for (size_t i = 0; i < header.files_count; i++) {
auto l = header.get_data_begin(i);
auto r = header.get_data_end(i);
add_chunk(header.get_name(i), l, r - l);
}
header_ = std::move(header);
return td::Status::OK();
}
size_t Torrent::get_ready_parts_count() const {
@ -432,6 +527,7 @@ size_t Torrent::get_ready_parts_count() const {
}
std::vector<size_t> Torrent::chunks_by_piece(td::uint64 piece_id) {
CHECK(inited_info_);
std::vector<size_t> res;
auto piece = info_.get_piece_info(piece_id);
auto is_ok = iterate_piece(piece, [&](auto it, auto info) {
@ -441,4 +537,165 @@ std::vector<size_t> Torrent::chunks_by_piece(td::uint64 piece_id) {
return res;
}
td::Status Torrent::init_info(Info info) {
if (hash_ != info.get_hash()) {
return td::Status::Error("Hash mismatch");
}
if (inited_info_) {
return td::Status::OK();
}
auto S = info.validate();
if (S.is_error()) {
S = S.move_as_error_prefix("Invalid torrent info: ");
fatal_error_ = S.clone();
return S;
}
inited_info_ = true;
info_ = std::move(info);
merkle_tree_ = MerkleTree(info_.pieces_count(), info_.root_hash);
piece_is_ready_.resize(info_.pieces_count(), false);
not_ready_piece_count_ = piece_is_ready_.size();
header_pieces_count_ = (info_.header_size + info_.piece_size - 1) / info_.piece_size;
not_ready_pending_piece_count_ = header_pieces_count_;
header_str_ = td::BufferSlice(info_.header_size);
return td::Status::OK();
}
void Torrent::set_file_excluded(size_t i, bool excluded) {
CHECK(header_);
CHECK(i + 1 < chunks_.size());
if (!root_dir_) {
return; // All files are in-memory, nothing to do
}
size_t chunk_i = i + 1;
auto &chunk = chunks_[chunk_i];
if (chunk.excluded == excluded) {
return;
}
if (excluded) {
included_size_ -= chunk.size;
included_ready_size_ -= chunk.ready_size;
} else {
included_size_ += chunk.size;
included_ready_size_ += chunk.ready_size;
}
chunk.excluded = excluded;
if (!enabled_wirte_to_files_ || excluded) {
return;
}
auto range = get_file_parts_range(i);
for (auto it = in_memory_pieces_.lower_bound(range.begin); it != in_memory_pieces_.end() && it->first < range.end;) {
if (!it->second.pending_chunks.count(chunk_i)) {
++it;
continue;
}
auto piece_i = it->first;
auto piece = info_.get_piece_info(piece_i);
auto S = [&]() {
auto l = td::max(chunk.offset, piece.offset);
auto r = td::min(chunk.offset + chunk.size, piece.offset + piece.size);
TRY_STATUS(init_chunk_data(chunk));
TRY_STATUS(chunk.write_piece(it->second.data.substr(l - piece.offset, r - l), l - chunk.offset));
chunk.ready_size += r - l;
included_ready_size_ += r - l;
return td::Status::OK();
}();
if (S.is_error()) {
// Erase piece completely
piece_is_ready_[piece_i] = false;
not_ready_piece_count_++;
iterate_piece(piece, [&](auto it2, auto info) {
if (!it2->excluded) {
included_ready_size_ -= info.size;
}
if (!it2->excluded || !it->second.pending_chunks.count(it2 - chunks_.begin())) {
it2->ready_size -= info.size;
}
return td::Status::OK();
});
it = in_memory_pieces_.erase(it);
continue;
}
it->second.pending_chunks.erase(chunk_i);
if (it->second.pending_chunks.empty()) {
it = in_memory_pieces_.erase(it);
} else {
++it;
}
}
}
void Torrent::load_from_files(std::string files_path) {
CHECK(inited_header());
std::vector<td::optional<td::BlobView>> new_blobs;
new_blobs.push_back(td::BufferSliceBlobView::create(get_header().serialize()));
size_t files_count = get_files_count().unwrap();
for (size_t i = 0; i < files_count; ++i) {
std::string new_path = PSTRING() << files_path << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH
<< get_file_name(i);
auto R = td::FileNoCacheBlobView::create(new_path, get_file_size(i), false);
if (R.is_error() && files_count == 1) {
R = td::FileNoCacheBlobView::create(files_path, get_file_size(i), false);
}
if (R.is_error()) {
new_blobs.emplace_back();
} else {
new_blobs.push_back(R.move_as_ok());
}
}
auto load_new_piece = [&](size_t piece_i) -> td::Result<std::string> {
auto piece = info_.get_piece_info(piece_i);
bool included = false;
TRY_STATUS(iterate_piece(piece, [&](auto it, IterateInfo info) {
if (!it->excluded) {
included = true;
}
return td::Status::OK();
}));
if (!included) {
return td::Status::Error("Piece is excluded");
}
std::string data(piece.size, '\0');
TRY_STATUS(iterate_piece(piece, [&](auto it, IterateInfo info) {
size_t chunk_i = it - chunks_.begin();
if (!new_blobs[chunk_i]) {
return td::Status::Error("No such file");
}
TRY_RESULT(s, new_blobs[chunk_i].value().view_copy(td::MutableSlice(data).substr(info.piece_offset, info.size),
info.chunk_offset));
if (s != info.size) {
return td::Status::Error("Can't read file");
}
return td::Status::OK();
}));
return data;
};
std::vector<std::pair<size_t, td::Bits256>> new_pieces;
for (size_t i = 0; i < piece_is_ready_.size(); ++i) {
if (piece_is_ready_[i]) {
continue;
}
auto r_data = load_new_piece(i);
if (r_data.is_error()) {
continue;
}
td::Bits256 hash;
td::sha256(r_data.ok(), hash.as_slice());
new_pieces.emplace_back(i, hash);
}
size_t added_cnt = 0;
for (size_t i : merkle_tree_.add_pieces(std::move(new_pieces))) {
auto r_data = load_new_piece(i);
if (r_data.is_error()) {
continue;
}
if (add_piece(i, r_data.ok(), {}).is_ok()) {
++added_cnt;
}
}
if (added_cnt > 0) {
LOG(INFO) << "Loaded " << added_cnt << " new pieces for " << get_hash().to_hex();
}
}
} // namespace ton

View file

@ -25,6 +25,7 @@
#include "td/db/utils/BlobView.h"
#include <map>
#include <set>
namespace ton {
class Torrent {
@ -40,6 +41,7 @@ class Torrent {
};
// creation
static td::Result<Torrent> open(Options options, td::Bits256 hash);
static td::Result<Torrent> open(Options options, TorrentMeta meta);
static td::Result<Torrent> open(Options options, td::Slice meta_str);
void validate();
@ -54,7 +56,8 @@ class Torrent {
// add piece (with an optional proof)
td::Status add_piece(td::uint64 piece_i, td::Slice data, td::Ref<vm::Cell> proof);
//TODO: add multiple chunks? Merkle tree supports much more general interface
//TODO: add multiple pieces? Merkle tree supports much more general interface
td::Status add_proof(td::Ref<vm::Cell> proof);
bool is_completed() const;
@ -91,19 +94,74 @@ class Torrent {
td::CSlice get_file_name(size_t i) const;
td::uint64 get_file_size(size_t i) const;
td::uint64 get_file_ready_size(size_t i) const;
std::string get_file_path(size_t i) const;
struct PartsRange {
td::uint64 begin{0};
td::uint64 end{0};
bool contains(td::uint64 i) const {
return begin <= i && i < end;
}
};
PartsRange get_file_parts_range(size_t i);
PartsRange get_header_parts_range();
PartsRange get_header_parts_range() const;
size_t get_ready_parts_count() const;
std::vector<size_t> chunks_by_piece(td::uint64 piece_id);
bool inited_info() const {
return inited_info_;
}
bool inited_header() const {
return (bool)header_;
}
td::Bits256 get_hash() const {
return hash_;
}
std::string get_root_dir() const {
return root_dir_ ? root_dir_.value() : "";
}
td::Status init_info(Info info);
td::Status set_header(TorrentHeader header);
void enable_write_to_files();
void set_file_excluded(size_t i, bool excluded);
bool file_is_excluded(size_t i) const {
return chunks_.at(i).excluded;
}
td::uint64 get_included_size() const {
return header_ ? included_size_ : info_.file_size;
}
td::uint64 get_included_ready_size() const {
return included_ready_size_;
}
bool is_piece_in_memory(td::uint64 i) const {
return in_memory_pieces_.count(i);
}
std::set<td::uint64> get_pieces_in_memory() const {
std::set<td::uint64> pieces;
for (const auto &p : in_memory_pieces_) {
pieces.insert(p.first);
}
return pieces;
}
const td::Status &get_fatal_error() const {
return fatal_error_;
}
const TorrentHeader &get_header() const {
CHECK(inited_header())
return header_.value();
}
void load_from_files(std::string files_path);
private:
td::Bits256 hash_;
bool inited_info_ = false;
Info info_;
td::optional<std::string> root_dir_;
@ -113,6 +171,12 @@ class Torrent {
size_t not_ready_pending_piece_count_{0};
size_t header_pieces_count_{0};
std::map<td::uint64, td::string> pending_pieces_;
bool enabled_wirte_to_files_ = false;
struct InMemoryPiece {
std::string data;
std::set<size_t> pending_chunks;
};
std::map<td::uint64, InMemoryPiece> in_memory_pieces_; // Pieces that overlap excluded files
ton::MerkleTree merkle_tree_;
@ -120,12 +184,15 @@ class Torrent {
size_t not_ready_piece_count_{0};
size_t ready_parts_count_{0};
td::Status fatal_error_ = td::Status::OK();
struct ChunkState {
std::string name;
td::uint64 offset{0};
td::uint64 size{0};
td::uint64 ready_size{0};
td::BlobView data;
bool excluded{false};
struct Cache {
td::uint64 offset{0};
@ -137,10 +204,11 @@ class Torrent {
return ready_size == size;
}
TD_WARN_UNUSED_RESULT td::Status add_piece(td::Slice piece, td::uint64 offset) {
TD_WARN_UNUSED_RESULT td::Status write_piece(td::Slice piece, td::uint64 offset) {
TRY_RESULT(written, data.write(piece, offset));
CHECK(written == piece.size());
ready_size += written;
if (written != piece.size()) {
return td::Status::Error("Written less than expected");
}
return td::Status::OK();
}
bool has_piece(td::uint64 offset, td::uint64 size) {
@ -149,21 +217,24 @@ class Torrent {
TD_WARN_UNUSED_RESULT td::Status get_piece(td::MutableSlice dest, td::uint64 offset, Cache *cache = nullptr);
};
std::vector<ChunkState> chunks_;
td::uint64 included_size_{0};
td::uint64 included_ready_size_{0};
explicit Torrent(Info info, td::optional<TorrentHeader> header, ton::MerkleTree tree, std::vector<ChunkState> chunk);
explicit Torrent(TorrentMeta meta);
explicit Torrent(td::Bits256 hash);
explicit Torrent(Info info, td::optional<TorrentHeader> header, ton::MerkleTree tree, std::vector<ChunkState> chunk,
std::string root_dir);
void set_root_dir(std::string root_dir) {
root_dir_ = std::move(root_dir);
}
std::string get_chunk_path(td::Slice name);
std::string get_chunk_path(td::Slice name) const;
td::Status init_chunk_data(ChunkState &chunk);
template <class F>
td::Status iterate_piece(Info::PieceInfo piece, F &&f);
void add_pending_pieces();
td::Status add_header_piece(td::uint64 piece_i, td::Slice data);
td::Status add_pending_piece(td::uint64 piece_i, td::Slice data);
td::Status add_validated_piece(td::uint64 piece_i, td::Slice data);
void set_header(const TorrentHeader &header);
};
} // namespace ton

View file

@ -25,11 +25,20 @@
#include "td/utils/PathView.h"
#include "td/utils/port/path.h"
#include "td/utils/tl_helpers.h"
#include "MicrochunkTree.h"
#include "TorrentHeader.hpp"
namespace ton {
td::Result<Torrent> Torrent::Creator::create_from_path(Options options, td::CSlice raw_path) {
TRY_RESULT(path, td::realpath(raw_path));
TRY_RESULT(stat, td::stat(path));
std::string root_dir = path;
while (!root_dir.empty() && root_dir.back() == TD_DIR_SLASH) {
root_dir.pop_back();
}
while (!root_dir.empty() && root_dir.back() != TD_DIR_SLASH) {
root_dir.pop_back();
}
if (stat.is_dir_) {
if (!path.empty() && path.back() != TD_DIR_SLASH) {
path += TD_DIR_SLASH;
@ -50,17 +59,21 @@ td::Result<Torrent> Torrent::Creator::create_from_path(Options options, td::CSli
});
TRY_STATUS(std::move(status));
TRY_STATUS(std::move(walk_status));
creator.root_dir_ = std::move(root_dir);
std::sort(creator.files_.begin(), creator.files_.end(),
[](const Torrent::Creator::File& a, const Torrent::Creator::File& b) { return a.name < b.name; });
return creator.finalize();
} else {
Torrent::Creator creator(options);
TRY_STATUS(creator.add_file(td::PathView(path).file_name(), path));
creator.root_dir_ = std::move(root_dir);
return creator.finalize();
}
}
td::Result<Torrent> Torrent::Creator::create_from_blobs(Options options, td::Span<Blob> blobs) {
Torrent::Creator creator(options);
for (auto &blob : blobs) {
for (auto& blob : blobs) {
TRY_STATUS(creator.add_blob(blob.name, blob.data));
}
return creator.finalize();
@ -79,7 +92,7 @@ td::Status Torrent::Creator::add_blob(td::Slice name, td::BlobView blob) {
}
TD_WARN_UNUSED_RESULT td::Status Torrent::Creator::add_file(td::Slice name, td::CSlice path) {
LOG(INFO) << "Add file " << name << " " << path;
LOG(DEBUG) << "Add file " << name << " " << path;
TRY_RESULT(data, td::FileNoCacheBlobView::create(path));
return add_blob(name, std::move(data));
}
@ -115,11 +128,9 @@ td::Result<Torrent> Torrent::Creator::finalize() {
auto header_size = header.serialization_size();
auto file_size = header_size + data_offset;
auto chunks_count = (file_size + options_.piece_size - 1) / options_.piece_size;
ton::MerkleTree tree;
tree.init_begin(chunks_count);
auto pieces_count = (file_size + options_.piece_size - 1) / options_.piece_size;
std::vector<Torrent::ChunkState> chunks;
size_t chunk_i = 0;
std::vector<td::Bits256> pieces;
auto flush_reader = [&](bool force) {
while (true) {
auto slice = reader.prepare_read();
@ -127,16 +138,14 @@ td::Result<Torrent> Torrent::Creator::finalize() {
if (slice.empty() || (slice.size() != options_.piece_size && !force)) {
break;
}
td::UInt256 hash;
td::Bits256 hash;
sha256(slice, hash.as_slice());
CHECK(chunk_i < chunks_count);
tree.init_add_chunk(chunk_i, hash.as_slice());
chunk_i++;
pieces.push_back(hash);
reader.confirm_read(slice.size());
}
};
td::uint64 offset = 0;
auto add_blob = [&](auto &&data, td::Slice name) {
auto add_blob = [&](auto data, td::Slice name) {
td::uint64 data_offset = 0;
while (data_offset < data.size()) {
auto dest = writer.prepare_write();
@ -168,24 +177,25 @@ td::Result<Torrent> Torrent::Creator::finalize() {
td::sha256(header_str, info.header_hash.as_slice());
add_blob(td::BufferSliceBlobView::create(td::BufferSlice(header_str)), "").ensure();
for (auto &file : files_) {
for (auto& file : files_) {
add_blob(std::move(file.data), file.name).ensure();
}
flush_reader(true);
tree.init_finish();
CHECK(chunk_i == chunks_count);
CHECK(pieces.size() == pieces_count);
CHECK(offset == file_size);
MerkleTree tree(std::move(pieces));
info.header_size = header.serialization_size();
info.piece_size = options_.piece_size;
info.description = options_.description;
info.file_size = file_size;
info.depth = tree.get_depth();
info.root_hash = tree.get_root_hash();
info.init_cell();
TRY_STATUS_PREFIX(info.validate(), "Invalid torrent info: ");
TRY_STATUS_PREFIX(header.validate(info.file_size, info.header_size), "Invalid torrent header: ");
Torrent torrent(info, std::move(header), std::move(tree), std::move(chunks));
Torrent torrent(info, std::move(header), std::move(tree), std::move(chunks), root_dir_);
return std::move(torrent);
}

View file

@ -55,13 +55,12 @@ class Torrent::Creator {
td::Result<Torrent> finalize();
private:
td::Status init();
Options options_;
struct File {
std::string name;
td::BlobView data;
};
std::vector<File> files_;
std::string root_dir_;
};
} // namespace ton

View file

@ -66,4 +66,76 @@ td::Slice TorrentHeader::get_name(td::uint64 file_i) const {
return td::Slice(names).substr(from, till - from);
}
static td::Status validate_name(td::Slice name, bool is_dir_name = false) {
if (name.empty()) {
return td::Status::Error("Name can't be empty");
}
if (name[0] == '/') {
return td::Status::Error("Name can't start with '/'");
}
if (name.back() == '/' && !is_dir_name) {
return td::Status::Error("Name can't end with '/'");
}
for (size_t l = 0; l < name.size();) {
size_t r = l + 1;
while (r < name.size() && name[r] != '/') {
++r;
}
td::Slice s = name.substr(l, r - l);
if (s == "") {
return td::Status::Error("Name can't contain consequitive '/'");
}
if (s == ".") {
return td::Status::Error("Name can't contain component \".\"");
}
if (s == "..") {
return td::Status::Error("Name can't contain component \"..\"");
}
l = r + 1;
}
return td::Status::OK();
}
td::Status TorrentHeader::validate(td::uint64 total_size, td::uint64 header_size) const {
if (serialization_size() != header_size) {
return td::Status::Error("Invalid size");
}
for (size_t i = 0; i + 1 < files_count; ++i) {
if (name_index[i] > name_index[i + 1]) {
return td::Status::Error("Invalid name offset");
}
}
if (name_index.back() != names.size()) {
return td::Status::Error("Invalid name offset");
}
for (size_t i = 0; i < files_count; ++i) {
if (get_data_offset(i) > get_data_offset(i + 1)) {
return td::Status::Error("Invalid data offset");
}
}
if (get_data_offset(files_count) != total_size) {
return td::Status::Error("Invalid data offset");
}
std::set<std::string> names;
for (size_t i = 0; i < files_count; ++i) {
auto name = get_name(i);
TRY_STATUS_PREFIX(validate_name(name), PSTRING() << "Invalid filename " << name << ": ");
if (!names.insert(name.str()).second) {
return td::Status::Error(PSTRING() << "Duplicate filename " << name);
}
}
if (!dir_name.empty()) {
TRY_STATUS_PREFIX(validate_name(dir_name, true), "Invalid dir_name: ");
}
for (const std::string& name : names) {
std::string name1 = name + '/';
auto it = names.lower_bound(name1);
if (it != names.end() && it->substr(0, name1.size()) == name1) {
return td::Status::Error(PSTRING() << "Filename " << name << " coincides with directory name");
}
}
return td::Status::OK();
}
} // namespace ton

View file

@ -21,6 +21,7 @@
#include "td/utils/Slice.h"
#include "td/utils/buffer.h"
#include "td/utils/Status.h"
namespace ton {
// fec_info_none#c82a1964 = FecInfo;
@ -37,6 +38,13 @@ namespace ton {
// names:(file_names_size * [uint8])
// data:(tot_data_size * [uint8])
// = TorrentHeader;
//
// Filename rules:
// 1) Name can't be empty
// 2) Names in a torrent should be unique
// 3) Name can't start or end with '/' or contain two consequitive '/'
// 4) Components of name can't be equal to "." or ".."
// 5) If there's a name aaa/bbb/ccc, no other name can start with aaa/bbb/ccc/
struct TorrentHeader {
td::uint32 files_count{0};
@ -64,5 +72,7 @@ struct TorrentHeader {
void store(StorerT &storer) const;
template <class ParserT>
void parse(ParserT &parser);
td::Status validate(td::uint64 total_size, td::uint64 header_size) const;
};
} // namespace ton

View file

@ -53,7 +53,7 @@ void TorrentHeader::parse(ParserT &parser) {
td::uint32 got_type;
parse(got_type, parser);
if (got_type != type) {
parser.set_error("Unknown fec type");
parser.set_error("Unknown type");
return;
}
parse(files_count, parser);

View file

@ -26,19 +26,18 @@
namespace ton {
bool TorrentInfo::pack(vm::CellBuilder &cb) const {
return cb.store_long_bool(depth, 32) && cb.store_long_bool(piece_size, 32) && cb.store_long_bool(file_size, 64) &&
cb.store_bits_bool(root_hash) && cb.store_long_bool(header_size, 64) && cb.store_bits_bool(header_hash) &&
return cb.store_long_bool(piece_size, 32) && cb.store_long_bool(file_size, 64) && cb.store_bits_bool(root_hash) &&
cb.store_long_bool(header_size, 64) && cb.store_bits_bool(header_hash) &&
vm::CellText::store(cb, description).is_ok();
}
bool TorrentInfo::unpack(vm::CellSlice &cs) {
return cs.fetch_uint_to(32, depth) && cs.fetch_uint_to(32, piece_size) && cs.fetch_uint_to(64, file_size) &&
cs.fetch_bits_to(root_hash) && cs.fetch_uint_to(64, header_size) && cs.fetch_bits_to(header_hash) &&
vm::CellText::fetch_to(cs, description);
return cs.fetch_uint_to(32, piece_size) && cs.fetch_uint_to(64, file_size) && cs.fetch_bits_to(root_hash) &&
cs.fetch_uint_to(64, header_size) && cs.fetch_bits_to(header_hash) && vm::CellText::fetch_to(cs, description);
}
vm::Cell::Hash TorrentInfo::get_hash() const {
return as_cell()->get_hash();
td::Bits256 TorrentInfo::get_hash() const {
return as_cell()->get_hash().bits();
}
void TorrentInfo::init_cell() {
@ -48,7 +47,7 @@ void TorrentInfo::init_cell() {
}
td::Ref<vm::Cell> TorrentInfo::as_cell() const {
CHECK(cell_.not_null());
CHECK(cell_.not_null())
return cell_;
}
@ -63,4 +62,17 @@ TorrentInfo::PieceInfo TorrentInfo::get_piece_info(td::uint64 piece_i) const {
info.size = td::min(static_cast<td::uint64>(piece_size), file_size - info.offset);
return info;
}
td::Status TorrentInfo::validate() const {
if (piece_size == 0) {
return td::Status::Error("Piece size is 0");
}
if (header_size > file_size) {
return td::Status::Error("Header is too big");
}
if (description.size() > 1024) {
return td::Status::Error("Description is too long");
}
return td::Status::OK();
}
} // namespace ton

View file

@ -23,11 +23,12 @@
#include "td/utils/UInt.h"
#include "vm/cells.h"
#include "td/utils/optional.h"
namespace ton {
// torrent_info depth:# piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) description:Text = TorrentInfo;
// torrent_info piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256)
// description:Text = TorrentInfo;
struct TorrentInfo {
td::uint32 depth{0};
td::uint32 piece_size{768 * 128};
td::uint64 file_size{0};
td::Bits256 root_hash;
@ -39,7 +40,7 @@ struct TorrentInfo {
bool unpack(vm::CellSlice &cs);
void init_cell();
vm::Cell::Hash get_hash() const;
td::Bits256 get_hash() const;
td::Ref<vm::Cell> as_cell() const;
struct PieceInfo {
@ -50,6 +51,8 @@ struct TorrentInfo {
td::uint64 pieces_count() const;
PieceInfo get_piece_info(td::uint64 piece_i) const;
td::Status validate() const;
private:
td::Ref<vm::Cell> cell_;
};

View file

@ -40,8 +40,6 @@ td::Result<TorrentMeta> TorrentMeta::deserialize(td::Slice data) {
if (header_hash != res.info.header_hash) {
return td::Status::Error("Header hash mismatch");
}
} else {
LOG(ERROR) << "NO HEADER";
}
if (res.root_proof.not_null()) {
auto root = vm::MerkleProof::virtualize(res.root_proof, 1);

57
storage/db.h Normal file
View file

@ -0,0 +1,57 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "td/db/KeyValueAsync.h"
#include "tl-utils/common-utils.hpp"
namespace db {
using DbType = td::KeyValueAsync<td::Bits256, td::BufferSlice>;
template <typename T>
inline void db_get(DbType& db, td::Bits256 key, bool allow_not_found,
td::Promise<ton::tl_object_ptr<T>> promise) {
db.get(key, [allow_not_found, promise = std::move(promise)](td::Result<DbType::GetResult> R) mutable {
TRY_RESULT_PROMISE(promise, r, std::move(R));
if (r.status == td::KeyValueReader::GetStatus::NotFound) {
if (allow_not_found) {
promise.set_value(nullptr);
} else {
promise.set_error(td::Status::Error("Key not found"));
}
return;
}
promise.set_result(ton::fetch_tl_object<T>(r.value, true));
});
}
template <typename T>
inline td::Result<ton::tl_object_ptr<T>> db_get(td::KeyValue& db, td::Bits256 key, bool allow_not_found) {
std::string value;
TRY_RESULT(r, db.get(key.as_slice(), value));
if (r == td::KeyValue::GetStatus::NotFound) {
if (allow_not_found) {
return nullptr;
} else {
return td::Status::Error("Key not found");
}
}
return ton::fetch_tl_object<T>(td::Slice(value), true);
}
} // namespace db

View file

@ -23,8 +23,6 @@
#include "dht/dht.h"
#include "keys/encryptor.h"
#include "overlay/overlay.h"
#include "rldp/rldp.h"
#include "rldp2/rldp.h"
#include "td/utils/JsonBuilder.h"
#include "td/utils/port/signals.h"
@ -37,13 +35,12 @@
#include "td/utils/filesystem.h"
#include "td/utils/port/path.h"
#include "td/actor/actor.h"
#include "td/actor/MultiPromise.h"
#include "terminal/terminal.h"
#include "Torrent.h"
#include "TorrentCreator.h"
#include "NodeActor.h"
#include "PeerManager.h"
#include "auto/tl/ton_api_json.h"
@ -53,8 +50,6 @@
#include <set>
#include "git.h"
namespace ton_rldp = ton::rldp2;
struct StorageCliOptions {
std::string config;
bool enable_readline{true};
@ -67,199 +62,6 @@ struct StorageCliOptions {
using AdnlCategory = td::int32;
class PeerManager : public td::actor::Actor {
public:
PeerManager(ton::adnl::AdnlNodeIdShort adnl_id, ton::overlay::OverlayIdFull overlay_id,
td::actor::ActorId<ton::overlay::Overlays> overlays, td::actor::ActorId<ton::adnl::Adnl> adnl,
td::actor::ActorId<ton_rldp::Rldp> rldp)
: adnl_id_(std::move(adnl_id))
, overlay_id_(std::move(overlay_id))
, overlays_(std::move(overlays))
, adnl_(std::move(adnl))
, rldp_(std::move(rldp)) {
CHECK(register_adnl_id(adnl_id_) == 1);
}
void start_up() override {
// TODO: forbid broadcasts?
auto rules = ton::overlay::OverlayPrivacyRules{ton::overlay::Overlays::max_fec_broadcast_size()};
class Callback : public ton::overlay::Overlays::Callback {
public:
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id,
td::BufferSlice data) override {
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
}
void receive_broadcast(ton::PublicKeyHash src, ton::overlay::OverlayIdShort overlay_id,
td::BufferSlice data) override {
}
};
send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay, adnl_id_, overlay_id_.clone(),
std::make_unique<Callback>(), rules, "{ \"type\": \"storage\" }");
}
void tear_down() override {
send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, adnl_id_, overlay_id_.compute_short_id());
}
void send_query(ton::PeerId src, ton::PeerId dst, td::BufferSlice query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
TRY_RESULT_PROMISE(promise, dst_id, peer_to_andl(dst));
query = ton::create_serialize_tl_object_suffix<ton::ton_api::storage_queryPrefix>(
std::move(query), overlay_id_.compute_short_id().bits256_value());
send_closure(rldp_, &ton_rldp::Rldp::send_query_ex, src_id, dst_id, "", std::move(promise), td::Timestamp::in(10),
std::move(query), 1 << 25);
}
void execute_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) {
data = data.from_slice(data.as_slice().substr(4 + 32));
auto src_id = register_adnl_id(src);
auto dst_id = register_adnl_id(dst);
auto it = peers_.find(std::make_pair(dst_id, src_id));
if (it == peers_.end()) {
auto node_it = nodes_.find(dst_id);
if (node_it == nodes_.end()) {
LOG(ERROR) << "Unknown query destination";
promise.set_error(td::Status::Error("Unknown query destination"));
return;
}
if (!node_it->second.is_alive()) {
LOG(ERROR) << "Expired query destination";
promise.set_error(td::Status::Error("Unknown query destination"));
return;
}
send_closure(node_it->second, &ton::NodeActor::start_peer, src_id,
[promise = std::move(promise),
data = std::move(data)](td::Result<td::actor::ActorId<ton::PeerActor>> r_peer) mutable {
TRY_RESULT_PROMISE(promise, peer, std::move(r_peer));
send_closure(peer, &ton::PeerActor::execute_query, std::move(data), std::move(promise));
});
return;
}
send_closure(it->second, &ton::PeerActor::execute_query, std::move(data), std::move(promise));
}
void register_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId<ton::PeerActor> peer) {
peers_[std::make_pair(src, dst)] = std::move(peer);
register_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void register_node(ton::PeerId src, td::actor::ActorId<ton::NodeActor> node) {
nodes_[src] = std::move(node);
register_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_node(ton::PeerId src, td::actor::ActorId<ton::NodeActor> node) {
auto it = nodes_.find(src);
CHECK(it != nodes_.end());
if (it->second == node) {
nodes_.erase(it);
}
unregister_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId<ton::PeerActor> peer) {
auto it = peers_.find(std::make_pair(src, dst));
CHECK(it != peers_.end());
if (it->second == peer) {
peers_.erase(it);
}
unregister_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_src(ton::PeerId src, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
if (--subscribed_peers_[src] == 0) {
LOG(ERROR) << "Unsubscribe " << src_id;
subscribed_peers_.erase(src);
send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, src_id,
ton::create_serialize_tl_object<ton::ton_api::storage_queryPrefix>(
overlay_id_.compute_short_id().bits256_value())
.as_slice()
.str());
}
promise.set_value({});
}
void register_src(ton::PeerId src, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
class Callback : public ton::adnl::Adnl::Callback {
public:
Callback(td::actor::ActorId<PeerManager> peer_manager) : peer_manager_(std::move(peer_manager)) {
}
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst,
td::BufferSlice data) override {
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
send_closure(peer_manager_, &PeerManager::execute_query, std::move(src), std::move(dst), std::move(data),
std::move(promise));
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
};
if (subscribed_peers_[src]++ == 0) {
LOG(ERROR) << "Subscribe " << src_id;
send_closure(adnl_, &ton::adnl::Adnl::subscribe, src_id,
ton::create_serialize_tl_object<ton::ton_api::storage_queryPrefix>(
overlay_id_.compute_short_id().bits256_value())
.as_slice()
.str(),
std::make_unique<Callback>(actor_id(this)));
}
promise.set_value({});
}
td::Result<ton::adnl::AdnlNodeIdShort> peer_to_andl(ton::PeerId id) {
if (id <= 0 || id > adnl_ids_.size()) {
return td::Status::Error(PSLICE() << "Invalid peer id " << id);
}
return adnl_ids_[id - 1];
}
ton::PeerId register_adnl_id(ton::adnl::AdnlNodeIdShort id) {
auto it = adnl_to_peer_id_.emplace(id, next_peer_id_);
if (it.second) {
LOG(ERROR) << "Register AndlId " << id << " -> " << it.first->second;
adnl_ids_.push_back(id);
next_peer_id_++;
}
return it.first->second;
}
void get_peers(td::Promise<std::vector<ton::PeerId>> promise) {
send_closure(overlays_, &ton::overlay::Overlays::get_overlay_random_peers, adnl_id_, overlay_id_.compute_short_id(),
30, promise.send_closure(actor_id(this), &PeerManager::got_overlay_random_peers));
}
private:
ton::adnl::AdnlNodeIdShort adnl_id_;
ton::overlay::OverlayIdFull overlay_id_;
td::actor::ActorId<ton::overlay::Overlays> overlays_;
td::actor::ActorId<ton::adnl::Adnl> adnl_;
td::actor::ActorId<ton_rldp::Rldp> rldp_;
std::map<std::pair<ton::PeerId, ton::PeerId>, td::actor::ActorId<ton::PeerActor>> peers_;
std::map<ton::PeerId, td::actor::ActorId<ton::NodeActor>> nodes_;
ton::PeerId next_peer_id_{1};
std::map<ton::adnl::AdnlNodeIdShort, ton::PeerId> adnl_to_peer_id_;
std::vector<ton::adnl::AdnlNodeIdShort> adnl_ids_;
std::map<ton::PeerId, td::uint32> subscribed_peers_;
void got_overlay_random_peers(td::Result<std::vector<ton::adnl::AdnlNodeIdShort>> r_peers,
td::Promise<std::vector<ton::PeerId>> promise) {
TRY_RESULT_PROMISE(promise, peers, std::move(r_peers));
std::vector<ton::PeerId> res;
for (auto peer : peers) {
res.push_back(register_adnl_id(peer));
}
promise.set_value(std::move(res));
}
};
class StorageCli : public td::actor::Actor {
public:
explicit StorageCli(StorageCliOptions options) : options_(std::move(options)) {
@ -338,7 +140,8 @@ class StorageCli : public td::actor::Actor {
td::mkdir(options_.db_root).ignore();
keyring_ = ton::keyring::Keyring::create(options_.db_root + "/keyring");
adnl_network_manager_ = ton::adnl::AdnlNetworkManager::create(td::narrow_cast<td::int16>(options_.addr.get_port()));
adnl_network_manager_ =
ton::adnl::AdnlNetworkManager::create(td::narrow_cast<td::uint16>(options_.addr.get_port()));
adnl_ = ton::adnl::Adnl::create(options_.db_root, keyring_.get());
td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, adnl_network_manager_.get());
rldp_ = ton_rldp::Rldp::create(adnl_.get());
@ -430,6 +233,7 @@ class StorageCli : public td::actor::Actor {
td::TerminalIO::out() << "create <dir/file>\tCreate torrent from a directory\n";
td::TerminalIO::out() << "info <id>\tPrint info about loaded torrent\n";
td::TerminalIO::out() << "load <file>\tLoad torrent file in memory\n";
td::TerminalIO::out() << "addhash <hash>\tAdd torrent by hash (in hex)\n";
td::TerminalIO::out() << "save <id> <file>\tSave torrent file\n";
td::TerminalIO::out() << "start <id>\tStart torrent downloading/uploading\n";
td::TerminalIO::out() << "seed <id>\tStart torrent uploading\n";
@ -451,6 +255,8 @@ class StorageCli : public td::actor::Actor {
torrent_info(parser.read_all(), std::move(cmd_promise));
} else if (cmd == "load") {
cmd_promise.set_result(torrent_load(parser.read_all()).move_map([](auto &&x) { return td::Unit(); }));
} else if (cmd == "addhash") {
cmd_promise.set_result(torrent_add_by_hash(parser.read_all()).move_map([](auto &&x) { return td::Unit(); }));
} else if (cmd == "save") {
auto id = parser.read_word();
parser.skip_whitespaces();
@ -544,7 +350,7 @@ class StorageCli : public td::actor::Actor {
ton::Torrent::Creator::Options options;
options.piece_size = 128 * 1024;
TRY_RESULT_PROMISE(promise, torrent, ton::Torrent::Creator::create_from_path(options, path));
auto hash = torrent.get_info().header_hash;
auto hash = torrent.get_hash();
for (auto &it : infos_) {
if (it.second.hash == hash) {
promise.set_error(td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")"));
@ -552,6 +358,7 @@ class StorageCli : public td::actor::Actor {
}
}
td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n";
td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n";
infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn<PeerManager>(),
td::actor::ActorOwn<ton::NodeActor>()});
torrent_id_++;
@ -600,12 +407,12 @@ class StorageCli : public td::actor::Actor {
}
}
td::actor::ActorOwn<PeerManager> create_peer_manager(vm::Cell::Hash hash) {
td::actor::ActorOwn<PeerManager> create_peer_manager(td::Bits256 hash) {
// create overlay network
td::BufferSlice hash_str(hash.as_slice());
ton::overlay::OverlayIdFull overlay_id(std::move(hash_str));
auto adnl_id = ton::adnl::AdnlNodeIdShort{public_key_.compute_short_id()};
return td::actor::create_actor<PeerManager>("PeerManager", adnl_id, std::move(overlay_id), overlays_.get(),
return td::actor::create_actor<PeerManager>("PeerManager", adnl_id, std::move(overlay_id), false, overlays_.get(),
adnl_.get(), rldp_.get());
}
@ -616,68 +423,17 @@ class StorageCli : public td::actor::Actor {
return;
}
if (ptr->peer_manager.empty()) {
ptr->peer_manager = create_peer_manager(ptr->torrent.value().get_info().get_hash());
ptr->peer_manager = create_peer_manager(ptr->torrent.value().get_hash());
}
ton::PeerId self_id = 1;
class Context : public ton::NodeActor::Callback {
class Callback : public ton::NodeActor::Callback {
public:
Context(td::actor::ActorId<PeerManager> peer_manager, td::actor::ActorId<StorageCli> storage_cli,
ton::PeerId self_id, td::uint32 torrent_id, td::Promise<td::Unit> on_completed)
: peer_manager_(peer_manager)
, storage_cli_(std::move(storage_cli))
, self_id_(self_id)
Callback(td::actor::ActorId<StorageCli> storage_cli, td::uint32 torrent_id, td::Promise<td::Unit> on_completed)
: storage_cli_(std::move(storage_cli))
, torrent_id_(std::move(torrent_id))
, on_completed_(std::move(on_completed)) {
}
void get_peers(td::Promise<std::vector<ton::PeerId>> promise) override {
send_closure(peer_manager_, &PeerManager::get_peers, std::move(promise));
}
void register_self(td::actor::ActorId<ton::NodeActor> self) override {
CHECK(self_.empty());
self_ = self;
send_closure(peer_manager_, &PeerManager::register_node, self_id_, self_);
}
~Context() {
if (!self_.empty()) {
send_closure(peer_manager_, &PeerManager::unregister_node, self_id_, self_);
}
}
td::actor::ActorOwn<ton::PeerActor> create_peer(ton::PeerId self_id, ton::PeerId peer_id,
td::SharedState<ton::PeerState> state) override {
CHECK(self_id == self_id_);
class PeerCallback : public ton::PeerActor::Callback {
public:
PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId<PeerManager> peer_manager)
: self_id_(self_id), peer_id_(peer_id), peer_manager_(std::move(peer_manager)) {
}
void register_self(td::actor::ActorId<ton::PeerActor> self) override {
CHECK(self_.empty());
self_ = std::move(self);
send_closure(peer_manager_, &PeerManager::register_peer, self_id_, peer_id_, self_);
}
void send_query(td::uint64 query_id, td::BufferSlice query) override {
send_closure(peer_manager_, &PeerManager::send_query, self_id_, peer_id_, std::move(query),
promise_send_closure(self_, &ton::PeerActor::on_query_result, query_id));
}
~PeerCallback() {
if (!self_.empty()) {
send_closure(peer_manager_, &PeerManager::unregister_peer, self_id_, peer_id_, self_);
}
}
private:
td::actor::ActorId<ton::PeerActor> self_;
ton::PeerId self_id_;
ton::PeerId peer_id_;
td::actor::ActorId<PeerManager> peer_manager_;
};
return td::actor::create_actor<ton::PeerActor>(PSLICE() << "ton::PeerActor " << self_id << "->" << peer_id,
td::make_unique<PeerCallback>(self_id, peer_id, peer_manager_),
std::move(state));
}
void on_completed() override {
if (on_completed_) {
@ -691,23 +447,20 @@ class StorageCli : public td::actor::Actor {
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
td::actor::ActorId<StorageCli> storage_cli_;
ton::PeerId self_id_;
td::uint32 torrent_id_;
std::vector<ton::PeerId> peers_;
td::Promise<td::Unit> on_completed_;
td::actor::ActorId<ton::NodeActor> self_;
};
td::Promise<td::Unit> on_completed;
if (wait_download) {
on_completed = std::move(promise);
}
auto context =
td::make_unique<Context>(ptr->peer_manager.get(), actor_id(this), self_id, ptr->id, std::move(on_completed));
ptr->node = td::actor::create_actor<ton::NodeActor>(PSLICE() << "Node#" << self_id, self_id, ptr->torrent.unwrap(),
std::move(context), should_download);
auto callback = td::make_unique<Callback>(actor_id(this), ptr->id, std::move(on_completed));
auto context = PeerManager::create_callback(ptr->peer_manager.get());
ptr->node =
td::actor::create_actor<ton::NodeActor>(PSLICE() << "Node#" << self_id, self_id, ptr->torrent.unwrap(),
std::move(callback), std::move(context), nullptr, should_download);
td::TerminalIO::out() << "Torrent #" << ptr->id << " started\n";
promise.release().release();
if (promise) {
@ -748,8 +501,11 @@ class StorageCli : public td::actor::Actor {
return;
}
auto file_id_str = parser.read_word();
size_t file_id = std::numeric_limits<size_t>::max();
if (file_id_str != "*") {
size_t file_id = 0;
bool all = false;
if (file_id_str == "*") {
all = true;
} else {
TRY_RESULT_PROMISE_ASSIGN(promise, file_id, td::to_integer_safe<std::size_t>(file_id_str));
}
TRY_RESULT_PROMISE(promise, priority, td::to_integer_safe<td::uint8>(parser.read_word()));
@ -757,7 +513,13 @@ class StorageCli : public td::actor::Actor {
promise.set_error(td::Status::Error("Priority = 255 is reserved"));
return;
}
send_closure(ptr->node, &ton::NodeActor::set_file_priority, file_id, priority);
if (all) {
send_closure(ptr->node, &ton::NodeActor::set_all_files_priority, priority,
promise.wrap([](bool) { return td::Unit(); }));
} else {
send_closure(ptr->node, &ton::NodeActor::set_file_priority_by_idx, file_id, priority,
promise.wrap([](bool) { return td::Unit(); }));
}
promise.set_value(td::Unit());
}
@ -779,13 +541,40 @@ class StorageCli : public td::actor::Actor {
TRY_RESULT(torrent, ton::Torrent::open(options, data));
auto hash = torrent.get_info().header_hash;
auto hash = torrent.get_hash();
for (auto &it : infos_) {
if (it.second.hash == hash) {
return td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")");
}
}
td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n";
td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n";
auto res =
infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn<PeerManager>(),
td::actor::ActorOwn<ton::NodeActor>()});
torrent_id_++;
return &res.first->second;
}
td::Result<Info *> torrent_add_by_hash(td::Slice hash_hex) {
td::Bits256 hash;
if (hash.from_hex(hash_hex) != 256) {
return td::Status::Error("Failed to parse torrent hash");
}
ton::Torrent::Options options;
options.in_memory = false;
options.root_dir = ".";
options.validate = false;
TRY_RESULT(torrent, ton::Torrent::open(options, hash));
for (auto &it : infos_) {
if (it.second.hash == hash) {
return td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")");
}
}
td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n";
td::TerminalIO::out() << "Torrent hash: " << torrent.get_hash().to_hex() << "\n";
auto res =
infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn<PeerManager>(),
td::actor::ActorOwn<ton::NodeActor>()});
@ -839,7 +628,8 @@ int main(int argc, char *argv[]) {
return (verbosity >= 0 && verbosity <= 20) ? td::Status::OK() : td::Status::Error("verbosity must be 0..20");
});
p.add_option('V', "version", "shows storage-cli build information", [&]() {
std::cout << "storage-cli build information: [ Commit: " << GitMetadata::CommitSHA1() << ", Date: " << GitMetadata::CommitDate() << "]\n";
std::cout << "storage-cli build information: [ Commit: " << GitMetadata::CommitSHA1()
<< ", Date: " << GitMetadata::CommitDate() << "]\n";
std::exit(0);
});
p.add_option('C', "config", "set ton config", [&](td::Slice arg) { options.config = arg.str(); });
@ -859,7 +649,7 @@ int main(int argc, char *argv[]) {
std::_Exit(2);
}
td::actor::Scheduler scheduler({0});
td::actor::Scheduler scheduler({3});
scheduler.run_in_context([&] { td::actor::create_actor<StorageCli>("console", options).release(); });
scheduler.run();
return 0;

View file

@ -0,0 +1,32 @@
cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR)
add_executable(embed-provider-code smartcont/embed-provider-code.cpp)
add_custom_command(
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMAND embed-provider-code smartcont/storage-provider-code.boc smartcont/provider-code.h
COMMENT "Generate provider-code.h"
DEPENDS embed-provider-code smartcont/storage-provider-code.boc
OUTPUT smartcont/provider-code.h
)
set(STORAGE_DAEMON_SOURCE
storage-daemon.cpp
StorageManager.h
StorageManager.cpp
StorageProvider.h
StorageProvider.cpp
smc-util.h
smc-util.cpp
smartcont/provider-code.h)
set(STORAGE_DAEMON_CLI_SOURCE
storage-daemon-cli.cpp
)
add_executable(storage-daemon ${STORAGE_DAEMON_SOURCE})
target_link_libraries(storage-daemon storage overlay tdutils tdactor adnl tl_api dht
rldp rldp2 fift-lib memprof git tonlib)
add_executable(storage-daemon-cli ${STORAGE_DAEMON_CLI_SOURCE})
target_link_libraries(storage-daemon-cli tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_block terminal git)

View file

@ -0,0 +1,268 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "StorageManager.h"
#include "td/utils/filesystem.h"
#include "td/utils/port/path.h"
#include "td/db/RocksDb.h"
#include "td/actor/MultiPromise.h"
static overlay::OverlayIdFull get_overlay_id(td::Bits256 hash) {
td::BufferSlice hash_str(hash.as_slice());
return overlay::OverlayIdFull(std::move(hash_str));
}
StorageManager::StorageManager(adnl::AdnlNodeIdShort local_id, std::string db_root, td::unique_ptr<Callback> callback,
bool client_mode, td::actor::ActorId<adnl::Adnl> adnl,
td::actor::ActorId<ton_rldp::Rldp> rldp, td::actor::ActorId<overlay::Overlays> overlays)
: local_id_(local_id)
, db_root_(std::move(db_root))
, callback_(std::move(callback))
, client_mode_(client_mode)
, adnl_(std::move(adnl))
, rldp_(std::move(rldp))
, overlays_(std::move(overlays)) {
}
void StorageManager::start_up() {
CHECK(db_root_ != "");
td::mkdir(db_root_).ensure();
db_root_ = td::realpath(db_root_).move_as_ok();
td::mkdir(db_root_ + "/torrent-db").ensure();
td::mkdir(db_root_ + "/torrent-files").ensure();
LOG(INFO) << "Starting Storage manager. DB = " << db_root_;
db_ = std::make_shared<db::DbType>(
std::make_shared<td::RocksDb>(td::RocksDb::open(db_root_ + "/torrent-db").move_as_ok()));
db::db_get<ton_api::storage_db_torrentList>(
*db_, create_hash_tl_object<ton_api::storage_db_key_torrentList>(), true,
[SelfId = actor_id(this)](td::Result<tl_object_ptr<ton_api::storage_db_torrentList>> R) {
std::vector<td::Bits256> torrents;
if (R.is_error()) {
LOG(ERROR) << "Failed to load torrent list from db: " << R.move_as_error();
} else {
auto r = R.move_as_ok();
if (r != nullptr) {
torrents = std::move(r->torrents_);
}
}
td::actor::send_closure(SelfId, &StorageManager::load_torrents_from_db, std::move(torrents));
});
}
void StorageManager::load_torrents_from_db(std::vector<td::Bits256> torrents) {
td::MultiPromise mp;
auto ig = mp.init_guard();
ig.add_promise([SelfId = actor_id(this)](td::Result<td::Unit> R) {
td::actor::send_closure(SelfId, &StorageManager::after_load_torrents_from_db);
});
for (auto hash : torrents) {
CHECK(!torrents_.count(hash))
auto& entry = torrents_[hash];
entry.peer_manager = td::actor::create_actor<PeerManager>("PeerManager", local_id_, get_overlay_id(hash),
client_mode_, overlays_, adnl_, rldp_);
NodeActor::load_from_db(
db_, hash, create_callback(hash, entry.closing_state), PeerManager::create_callback(entry.peer_manager.get()),
[SelfId = actor_id(this), hash,
promise = ig.get_promise()](td::Result<td::actor::ActorOwn<NodeActor>> R) mutable {
td::actor::send_closure(SelfId, &StorageManager::loaded_torrent_from_db, hash, std::move(R));
promise.set_result(td::Unit());
});
}
}
void StorageManager::loaded_torrent_from_db(td::Bits256 hash, td::Result<td::actor::ActorOwn<NodeActor>> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to load torrent " << hash.to_hex() << " from db: " << R.move_as_error();
torrents_.erase(hash);
} else {
auto it = torrents_.find(hash);
CHECK(it != torrents_.end());
it->second.actor = R.move_as_ok();
LOG(INFO) << "Loaded torrent " << hash.to_hex() << " from db";
}
}
void StorageManager::after_load_torrents_from_db() {
LOG(INFO) << "Finished loading torrents from db (" << torrents_.size() << " torrents)";
db_store_torrent_list();
callback_->on_ready();
}
td::unique_ptr<NodeActor::Callback> StorageManager::create_callback(
td::Bits256 hash, std::shared_ptr<TorrentEntry::ClosingState> closing_state) {
class Callback : public NodeActor::Callback {
public:
Callback(td::actor::ActorId<StorageManager> id, td::Bits256 hash,
std::shared_ptr<TorrentEntry::ClosingState> closing_state)
: id_(std::move(id)), hash_(hash), closing_state_(std::move(closing_state)) {
}
void on_completed() override {
}
void on_closed(Torrent torrent) override {
CHECK(torrent.get_hash() == hash_);
td::actor::send_closure(id_, &StorageManager::on_torrent_closed, std::move(torrent), closing_state_);
}
private:
td::actor::ActorId<StorageManager> id_;
td::Bits256 hash_;
std::shared_ptr<TorrentEntry::ClosingState> closing_state_;
};
return td::make_unique<Callback>(actor_id(this), hash, std::move(closing_state));
}
void StorageManager::add_torrent(Torrent torrent, bool start_download, td::Promise<td::Unit> promise) {
TRY_STATUS_PROMISE(promise, add_torrent_impl(std::move(torrent), start_download));
db_store_torrent_list();
promise.set_result(td::Unit());
}
td::Status StorageManager::add_torrent_impl(Torrent torrent, bool start_download) {
td::Bits256 hash = torrent.get_hash();
if (torrents_.count(hash)) {
return td::Status::Error("Cannot add torrent: duplicate hash");
}
TorrentEntry& entry = torrents_[hash];
entry.hash = hash;
entry.peer_manager = td::actor::create_actor<PeerManager>("PeerManager", local_id_, get_overlay_id(hash),
client_mode_, overlays_, adnl_, rldp_);
auto context = PeerManager::create_callback(entry.peer_manager.get());
LOG(INFO) << "Added torrent " << hash.to_hex() << " , root_dir = " << torrent.get_root_dir();
entry.actor =
td::actor::create_actor<NodeActor>("Node", 1, std::move(torrent), create_callback(hash, entry.closing_state),
std::move(context), db_, start_download);
return td::Status::OK();
}
void StorageManager::add_torrent_by_meta(TorrentMeta meta, std::string root_dir, bool start_download,
td::Promise<td::Unit> promise) {
td::Bits256 hash(meta.info.get_hash());
Torrent::Options options;
options.root_dir = root_dir.empty() ? db_root_ + "/torrent-files/" + hash.to_hex() : root_dir;
TRY_RESULT_PROMISE(promise, torrent, Torrent::open(std::move(options), std::move(meta)));
add_torrent(std::move(torrent), start_download, std::move(promise));
}
void StorageManager::add_torrent_by_hash(td::Bits256 hash, std::string root_dir, bool start_download,
td::Promise<td::Unit> promise) {
Torrent::Options options;
options.root_dir = root_dir.empty() ? db_root_ + "/torrent-files/" + hash.to_hex() : root_dir;
TRY_RESULT_PROMISE(promise, torrent, Torrent::open(std::move(options), hash));
add_torrent(std::move(torrent), start_download, std::move(promise));
}
void StorageManager::set_active_download(td::Bits256 hash, bool active, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::set_should_download, active);
promise.set_result(td::Unit());
}
void StorageManager::with_torrent(td::Bits256 hash, td::Promise<NodeActor::NodeState> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::with_torrent, std::move(promise));
}
void StorageManager::get_all_torrents(td::Promise<std::vector<td::Bits256>> promise) {
std::vector<td::Bits256> result;
for (const auto& p : torrents_) {
result.push_back(p.first);
}
promise.set_result(std::move(result));
}
void StorageManager::db_store_torrent_list() {
std::vector<td::Bits256> torrents;
for (const auto& p : torrents_) {
torrents.push_back(p.first);
}
db_->set(create_hash_tl_object<ton_api::storage_db_key_torrentList>(),
create_serialize_tl_object<ton_api::storage_db_torrentList>(std::move(torrents)),
[](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to save torrent list to db: " << R.move_as_error();
}
});
}
void StorageManager::set_all_files_priority(td::Bits256 hash, td::uint8 priority, td::Promise<bool> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::set_all_files_priority, priority, std::move(promise));
}
void StorageManager::set_file_priority_by_idx(td::Bits256 hash, size_t idx, td::uint8 priority,
td::Promise<bool> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::set_file_priority_by_idx, idx, priority, std::move(promise));
}
void StorageManager::set_file_priority_by_name(td::Bits256 hash, std::string name, td::uint8 priority,
td::Promise<bool> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::set_file_priority_by_name, std::move(name), priority,
std::move(promise));
}
void StorageManager::remove_torrent(td::Bits256 hash, bool remove_files, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
LOG(INFO) << "Removing torrent " << hash.to_hex();
entry->closing_state->removing = true;
entry->closing_state->remove_files = remove_files;
entry->closing_state->promise = std::move(promise);
torrents_.erase(hash);
db_store_torrent_list();
}
void StorageManager::load_from(td::Bits256 hash, td::optional<TorrentMeta> meta, std::string files_path,
td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::load_from, std::move(meta), std::move(files_path),
std::move(promise));
}
void StorageManager::on_torrent_closed(Torrent torrent, std::shared_ptr<TorrentEntry::ClosingState> closing_state) {
if (!closing_state->removing) {
return;
}
if (closing_state->remove_files && torrent.inited_header()) {
size_t files_count = torrent.get_files_count().unwrap();
for (size_t i = 0; i < files_count; ++i) {
std::string path = torrent.get_file_path(i);
td::unlink(path).ignore();
// TODO: Check errors, remove empty directories
}
}
td::rmrf(db_root_ + "/torrent-files/" + torrent.get_hash().to_hex()).ignore();
NodeActor::cleanup_db(db_, torrent.get_hash(),
[promise = std::move(closing_state->promise)](td::Result<td::Unit> R) mutable {
if (R.is_error()) {
LOG(ERROR) << "Failed to cleanup database: " << R.move_as_error();
}
promise.set_result(td::Unit());
});
}
void StorageManager::wait_for_completion(td::Bits256 hash, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::wait_for_completion, std::move(promise));
}
void StorageManager::get_peers_info(td::Bits256 hash,
td::Promise<tl_object_ptr<ton_api::storage_daemon_peerList>> promise) {
TRY_RESULT_PROMISE(promise, entry, get_torrent(hash));
td::actor::send_closure(entry->actor, &NodeActor::get_peers_info, std::move(promise));
}

View file

@ -0,0 +1,107 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "td/actor/actor.h"
#include "adnl/adnl.h"
#include "rldp2/rldp.h"
#include "overlay/overlays.h"
#include "storage/PeerManager.h"
#include "storage/db.h"
using namespace ton;
class StorageManager : public td::actor::Actor {
public:
class Callback {
public:
virtual ~Callback() = default;
virtual void on_ready() = 0;
};
StorageManager(adnl::AdnlNodeIdShort local_id, std::string db_root, td::unique_ptr<Callback> callback,
bool client_mode, td::actor::ActorId<adnl::Adnl> adnl, td::actor::ActorId<ton_rldp::Rldp> rldp,
td::actor::ActorId<overlay::Overlays> overlays);
void start_up() override;
void add_torrent(Torrent torrent, bool start_download, td::Promise<td::Unit> promise);
void add_torrent_by_meta(TorrentMeta meta, std::string root_dir, bool start_download, td::Promise<td::Unit> promise);
void add_torrent_by_hash(td::Bits256 hash, std::string root_dir, bool start_download, td::Promise<td::Unit> promise);
void set_active_download(td::Bits256 hash, bool active, td::Promise<td::Unit> promise);
void with_torrent(td::Bits256 hash, td::Promise<NodeActor::NodeState> promise);
void get_all_torrents(td::Promise<std::vector<td::Bits256>> promise);
void set_all_files_priority(td::Bits256 hash, td::uint8 priority, td::Promise<bool> promise);
void set_file_priority_by_idx(td::Bits256 hash, size_t idx, td::uint8 priority, td::Promise<bool> promise);
void set_file_priority_by_name(td::Bits256 hash, std::string name, td::uint8 priority, td::Promise<bool> promise);
void remove_torrent(td::Bits256 hash, bool remove_files, td::Promise<td::Unit> promise);
void load_from(td::Bits256 hash, td::optional<TorrentMeta> meta, std::string files_path,
td::Promise<td::Unit> promise);
void wait_for_completion(td::Bits256 hash, td::Promise<td::Unit> promise);
void get_peers_info(td::Bits256 hash, td::Promise<tl_object_ptr<ton_api::storage_daemon_peerList>> promise);
private:
adnl::AdnlNodeIdShort local_id_;
std::string db_root_;
td::unique_ptr<Callback> callback_;
bool client_mode_ = false;
td::actor::ActorId<adnl::Adnl> adnl_;
td::actor::ActorId<ton_rldp::Rldp> rldp_;
td::actor::ActorId<overlay::Overlays> overlays_;
std::shared_ptr<db::DbType> db_;
struct TorrentEntry {
td::Bits256 hash;
td::actor::ActorOwn<NodeActor> actor;
td::actor::ActorOwn<PeerManager> peer_manager;
struct ClosingState {
bool removing = false;
td::Promise<td::Unit> promise;
bool remove_files = false;
};
std::shared_ptr<ClosingState> closing_state = std::make_shared<ClosingState>();
};
std::map<td::Bits256, TorrentEntry> torrents_;
td::Status add_torrent_impl(Torrent torrent, bool start_download);
td::Result<TorrentEntry*> get_torrent(td::Bits256 hash) {
auto it = torrents_.find(hash);
if (it == torrents_.end()) {
return td::Status::Error("No such torrent");
}
return &it->second;
}
td::unique_ptr<NodeActor::Callback> create_callback(td::Bits256 hash,
std::shared_ptr<TorrentEntry::ClosingState> closing_state);
void load_torrents_from_db(std::vector<td::Bits256> torrents);
void loaded_torrent_from_db(td::Bits256 hash, td::Result<td::actor::ActorOwn<NodeActor>> R);
void after_load_torrents_from_db();
void db_store_torrent_list();
void on_torrent_closed(Torrent torrent, std::shared_ptr<TorrentEntry::ClosingState> closing_state);
};

View file

@ -0,0 +1,840 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "StorageProvider.h"
#include "td/db/RocksDb.h"
#include "td/utils/JsonBuilder.h"
#include "auto/tl/ton_api_json.h"
#include "td/utils/port/path.h"
#include "block/block-auto.h"
#include "common/delay.h"
#include "td/actor/MultiPromise.h"
td::Result<ProviderParams> ProviderParams::create(const tl_object_ptr<ton_api::storage_daemon_provider_params>& obj) {
ProviderParams p;
p.accept_new_contracts = obj->accept_new_contracts_;
p.rate_per_mb_day = td::string_to_int256(obj->rate_per_mb_day_);
if (p.rate_per_mb_day.is_null() || p.rate_per_mb_day->sgn() < 0) {
return td::Status::Error("Invalid rate");
}
p.max_span = obj->max_span_;
p.minimal_file_size = obj->minimal_file_size_;
p.maximal_file_size = obj->maximal_file_size_;
return p;
}
tl_object_ptr<ton_api::storage_daemon_provider_params> ProviderParams::tl() const {
return create_tl_object<ton_api::storage_daemon_provider_params>(
accept_new_contracts, rate_per_mb_day->to_dec_string(), max_span, minimal_file_size, maximal_file_size);
}
bool ProviderParams::to_builder(vm::CellBuilder& b) const {
return b.store_long_bool(accept_new_contracts, 1) && store_coins(b, rate_per_mb_day) &&
b.store_long_bool(max_span, 32) && b.store_long_bool(minimal_file_size, 64) &&
b.store_long_bool(maximal_file_size, 64);
}
StorageProvider::StorageProvider(ContractAddress account_address, std::string db_root,
td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client,
td::actor::ActorId<StorageManager> storage_manager,
td::actor::ActorId<keyring::Keyring> keyring)
: main_address_(account_address)
, db_root_(std::move(db_root))
, tonlib_client_(std::move(tonlib_client))
, storage_manager_(std::move(storage_manager))
, keyring_(std::move(keyring)) {
}
void StorageProvider::start_up() {
LOG(INFO) << "Initing storage provider, account address: " << main_address_.to_string();
td::mkdir(db_root_).ensure();
db_ = std::make_unique<td::RocksDb>(td::RocksDb::open(db_root_).move_as_ok());
auto r_state = db::db_get<ton_api::storage_provider_db_state>(
*db_, create_hash_tl_object<ton_api::storage_provider_db_key_state>(), true);
r_state.ensure();
auto state = r_state.move_as_ok();
if (state) {
last_processed_lt_ = state->last_processed_lt_;
LOG(INFO) << "Loaded storage provider state";
LOG(INFO) << "Last processed lt: " << last_processed_lt_;
}
class Callback : public FabricContractWrapper::Callback {
public:
explicit Callback(td::actor::ActorId<StorageProvider> id) : id_(std::move(id)) {
}
void on_transaction(tl_object_ptr<tonlib_api::raw_transaction> transaction) override {
td::actor::send_closure(id_, &StorageProvider::process_transaction, std::move(transaction));
}
private:
td::actor::ActorId<StorageProvider> id_;
};
contract_wrapper_ =
td::actor::create_actor<FabricContractWrapper>("ContractWrapper", main_address_, tonlib_client_, keyring_,
td::make_unique<Callback>(actor_id(this)), last_processed_lt_);
auto r_config = db::db_get<ton_api::storage_daemon_providerConfig>(
*db_, create_hash_tl_object<ton_api::storage_provider_db_key_providerConfig>(), true);
r_config.ensure();
auto config_obj = r_config.move_as_ok();
if (config_obj) {
LOG(INFO) << "Loaded config from db";
config_ = Config(config_obj);
} else {
LOG(INFO) << "Using default config";
db_store_config();
}
LOG(INFO) << "Config: max_contracts=" << config_.max_contracts << ", max_total_size=" << config_.max_total_size;
auto r_contract_list = db::db_get<ton_api::storage_provider_db_contractList>(
*db_, create_hash_tl_object<ton_api::storage_provider_db_key_contractList>(), true);
r_contract_list.ensure();
auto contract_list = r_contract_list.move_as_ok();
if (contract_list) {
LOG(INFO) << "Loading " << contract_list->contracts_.size() << " contracts from db";
for (auto& c : contract_list->contracts_) {
ContractAddress address(c->wc_, c->addr_);
if (contracts_.count(address)) {
LOG(ERROR) << "Duplicate contract in db: " << address.to_string();
continue;
}
auto r_contract = db::db_get<ton_api::storage_provider_db_storageContract>(
*db_, create_hash_tl_object<ton_api::storage_provider_db_key_storageContract>(address.wc, address.addr),
true);
r_contract.ensure();
auto db_contract = r_contract.move_as_ok();
if (!db_contract) {
LOG(ERROR) << "Missing contract in db: " << address.to_string();
continue;
}
StorageContract& contract = contracts_[address];
contract.torrent_hash = db_contract->torrent_hash_;
contract.microchunk_hash = db_contract->microchunk_hash_;
contract.created_time = db_contract->created_time_;
contract.state = (StorageContract::State)db_contract->state_;
contract.file_size = db_contract->file_size_;
contract.max_span = db_contract->max_span_;
contract.rate = td::string_to_int256(db_contract->rate_);
contracts_total_size_ += contract.file_size;
auto r_tree = db::db_get<ton_api::storage_provider_db_microchunkTree>(
*db_, create_hash_tl_object<ton_api::storage_provider_db_key_microchunkTree>(address.wc, address.addr), true);
r_tree.ensure();
auto tree = r_tree.move_as_ok();
if (tree) {
contract.microchunk_tree = std::make_shared<MicrochunkTree>(vm::std_boc_deserialize(tree->data_).move_as_ok());
}
LOG(INFO) << "Loaded contract from db: " << address.to_string() << ", torrent=" << contract.torrent_hash.to_hex()
<< ", state=" << contract.state;
}
}
for (auto& p : contracts_) {
const ContractAddress& address = p.first;
StorageContract& contract = p.second;
switch (contract.state) {
case StorageContract::st_downloading:
init_new_storage_contract(address, contract);
break;
case StorageContract::st_downloaded:
check_contract_active(address);
break;
case StorageContract::st_active:
contract.check_next_proof_at = td::Timestamp::now();
break;
case StorageContract::st_closing:
check_storage_contract_deleted(address);
break;
default:
LOG(FATAL) << "Invalid contract state in db";
}
}
LOG(INFO) << "Loaded contracts from db";
alarm();
}
void StorageProvider::get_params(td::Promise<ProviderParams> promise) {
return get_provider_params(tonlib_client_, main_address_, std::move(promise));
}
void StorageProvider::get_provider_params(td::actor::ActorId<tonlib::TonlibClientWrapper> client,
ContractAddress address, td::Promise<ProviderParams> promise) {
run_get_method(
address, client, "get_storage_params", std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>>(),
promise.wrap([](std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>> stack) -> td::Result<ProviderParams> {
if (stack.size() != 5) {
return td::Status::Error(PSTRING() << "Method returned " << stack.size() << " values, 5 expected");
}
TRY_RESULT_PREFIX(accept_new_contracts, entry_to_int<int>(stack[0]), "Invalid accept_new_contracts: ");
TRY_RESULT_PREFIX(rate_per_mb_day, entry_to_int<td::RefInt256>(stack[1]), "Invalid rate_per_mb_day: ");
TRY_RESULT_PREFIX(max_span, entry_to_int<td::uint32>(stack[2]), "Invalid max_span: ");
TRY_RESULT_PREFIX(minimal_file_size, entry_to_int<td::uint64>(stack[3]), "Invalid minimal_file_size: ");
TRY_RESULT_PREFIX(maximal_file_size, entry_to_int<td::uint64>(stack[4]), "Invalid maximal_file_size: ");
ProviderParams params;
params.accept_new_contracts = accept_new_contracts;
params.rate_per_mb_day = rate_per_mb_day;
params.max_span = max_span;
params.minimal_file_size = minimal_file_size;
params.maximal_file_size = maximal_file_size;
return params;
}));
}
void StorageProvider::set_params(ProviderParams params, td::Promise<td::Unit> promise) {
vm::CellBuilder b;
b.store_long(0x54cbf19b, 32); // const op::update_storage_params = 0x54cbf19b;
b.store_long(0, 64); // query_id
if (!params.to_builder(b)) {
promise.set_error(td::Status::Error("Failed to store params to builder"));
return;
}
LOG(INFO) << "Sending external message to update provider parameters: " << params.accept_new_contracts << ", "
<< params.max_span << ", " << params.rate_per_mb_day << ", " << params.minimal_file_size << ", "
<< params.maximal_file_size;
td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, main_address_,
td::make_refint(100'000'000), b.as_cellslice(), std::move(promise));
}
void StorageProvider::db_store_state() {
LOG(DEBUG) << "db_store_state last_lt=" << last_processed_lt_;
db_->begin_transaction().ensure();
db_->set(create_hash_tl_object<ton_api::storage_provider_db_key_state>().as_slice(),
create_serialize_tl_object<ton_api::storage_provider_db_state>(last_processed_lt_))
.ensure();
db_->commit_transaction().ensure();
}
void StorageProvider::db_store_config() {
LOG(DEBUG) << "db_store_config";
db_->begin_transaction().ensure();
db_->set(create_hash_tl_object<ton_api::storage_provider_db_key_providerConfig>().as_slice(),
serialize_tl_object(config_.tl(), true))
.ensure();
db_->commit_transaction().ensure();
}
void StorageProvider::alarm() {
for (auto& p : contracts_) {
if (p.second.check_next_proof_at && p.second.check_next_proof_at.is_in_past()) {
p.second.check_next_proof_at = td::Timestamp::never();
check_next_proof(p.first, p.second);
}
alarm_timestamp().relax(p.second.check_next_proof_at);
}
}
void StorageProvider::process_transaction(tl_object_ptr<tonlib_api::raw_transaction> transaction) {
std::string new_contract_address;
for (auto& message : transaction->out_msgs_) {
auto data = dynamic_cast<tonlib_api::msg_dataRaw*>(message->msg_data_.get());
if (data == nullptr) {
continue;
}
auto r_body = vm::std_boc_deserialize(data->body_);
if (r_body.is_error()) {
LOG(ERROR) << "Invalid message body in tonlib response: " << r_body.move_as_error();
continue;
}
td::Ref<vm::Cell> body = r_body.move_as_ok();
vm::CellSlice cs = vm::load_cell_slice(body);
// const op::offer_storage_contract = 0x107c49ef;
if (cs.size() >= 32 && cs.prefetch_long(32) == 0x107c49ef) {
new_contract_address = message->destination_->account_address_;
}
}
if (!new_contract_address.empty()) {
auto P = td::PromiseCreator::lambda(
[SelfId = actor_id(this), lt = (td::uint64)transaction->transaction_id_->lt_](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(ERROR) << "Error during processing new storage contract, skipping: " << R.move_as_error();
}
});
on_new_storage_contract(ContractAddress::parse(new_contract_address).move_as_ok(), std::move(P));
}
last_processed_lt_ = transaction->transaction_id_->lt_;
db_store_state();
}
void StorageProvider::on_new_storage_contract(ContractAddress address, td::Promise<td::Unit> promise, int max_retries) {
LOG(INFO) << "Processing new storage contract: " << address.to_string();
get_storage_contract_data(
address, tonlib_client_,
[SelfId = actor_id(this), address, promise = std::move(promise),
max_retries](td::Result<StorageContractData> R) mutable {
if (R.is_error()) {
if (max_retries > 0) {
LOG(WARNING) << "Processing new storage contract: " << R.move_as_error() << ", retrying";
delay_action(
[SelfId = std::move(SelfId), promise = std::move(promise), address = std::move(address),
max_retries]() mutable {
td::actor::send_closure(SelfId, &StorageProvider::on_new_storage_contract, std::move(address),
std::move(promise), max_retries - 1);
},
td::Timestamp::in(5.0));
} else {
promise.set_error(R.move_as_error());
}
return;
}
td::actor::send_closure(SelfId, &StorageProvider::on_new_storage_contract_cont, address, R.move_as_ok(),
std::move(promise));
});
}
void StorageProvider::on_new_storage_contract_cont(ContractAddress address, StorageContractData data,
td::Promise<td::Unit> promise) {
auto it = contracts_.emplace(address, StorageContract());
if (!it.second) {
promise.set_error(td::Status::Error(PSTRING() << "Storage contract already registered: " << address.to_string()));
return;
}
LOG(INFO) << "New storage contract " << address.to_string() << ", torrent hash: " << data.torrent_hash.to_hex();
LOG(DEBUG) << "Stoage contract data: microchunk_hash=" << data.microchunk_hash << ", balance=" << data.balance
<< ", file_size=" << data.file_size << ", next_proof=" << data.next_proof
<< ", rate=" << data.rate_per_mb_day << ", max_span=" << data.max_span;
StorageContract& contract = it.first->second;
contract.torrent_hash = data.torrent_hash;
contract.microchunk_hash = data.microchunk_hash;
contract.state = StorageContract::st_downloading;
contract.created_time = (td::uint32)td::Clocks::system();
contract.file_size = data.file_size;
contract.max_span = data.max_span;
contract.rate = data.rate_per_mb_day;
contracts_total_size_ += contract.file_size;
promise.set_result(td::Unit());
if (contracts_.size() <= config_.max_contracts && contracts_total_size_ <= config_.max_total_size) {
db_update_storage_contract(address, true);
init_new_storage_contract(address, contract);
} else {
if (contracts_.size() > config_.max_contracts) {
LOG(WARNING) << "Cannot add new storage contract: too many contracts (limit = " << config_.max_contracts << ")";
} else {
LOG(WARNING) << "Cannot add new storage contract: total size exceeded (limit = "
<< td::format::as_size(config_.max_total_size) << ")";
}
contract.state = StorageContract::st_closing;
db_update_storage_contract(address, true);
do_close_storage_contract(address);
}
}
void StorageProvider::db_update_storage_contract(const ContractAddress& address, bool update_list) {
LOG(DEBUG) << "db_update_storage_contract " << address.to_string() << " " << update_list;
db_->begin_transaction().ensure();
if (update_list) {
std::vector<tl_object_ptr<ton_api::storage_provider_db_contractAddress>> list;
for (const auto& t : contracts_) {
list.push_back(create_tl_object<ton_api::storage_provider_db_contractAddress>(t.first.wc, t.first.addr));
}
db_->set(create_hash_tl_object<ton_api::storage_provider_db_key_contractList>().as_slice(),
create_serialize_tl_object<ton_api::storage_provider_db_contractList>(std::move(list)))
.ensure();
}
auto key = create_hash_tl_object<ton_api::storage_provider_db_key_storageContract>(address.wc, address.addr);
auto it = contracts_.find(address);
if (it == contracts_.end()) {
db_->erase(key.as_slice()).ensure();
} else {
const StorageContract& contract = it->second;
db_->set(key.as_slice(),
create_serialize_tl_object<ton_api::storage_provider_db_storageContract>(
contract.torrent_hash, contract.microchunk_hash, contract.created_time, (int)contract.state,
contract.file_size, contract.rate->to_dec_string(), contract.max_span));
}
db_->commit_transaction().ensure();
}
void StorageProvider::db_update_microchunk_tree(const ContractAddress& address) {
LOG(DEBUG) << "db_update_microchunk_tree " << address.to_string();
db_->begin_transaction().ensure();
auto key = create_hash_tl_object<ton_api::storage_provider_db_key_microchunkTree>(address.wc, address.addr);
auto it = contracts_.find(address);
if (it == contracts_.end() || it->second.microchunk_tree == nullptr) {
db_->erase(key.as_slice()).ensure();
} else {
db_->set(key.as_slice(), create_serialize_tl_object<ton_api::storage_provider_db_microchunkTree>(
vm::std_boc_serialize(it->second.microchunk_tree->get_root()).move_as_ok()));
}
db_->commit_transaction().ensure();
}
void StorageProvider::init_new_storage_contract(ContractAddress address, StorageContract& contract) {
CHECK(contract.state == StorageContract::st_downloading);
td::actor::send_closure(storage_manager_, &StorageManager::add_torrent_by_hash, contract.torrent_hash, "", false,
[](td::Result<td::Unit> R) {
// Ignore errors: error can mean that the torrent already exists, other errors will be caught later
if (R.is_error()) {
LOG(DEBUG) << "Add torrent: " << R.move_as_error();
} else {
LOG(DEBUG) << "Add torrent: OK";
}
});
td::actor::send_closure(storage_manager_, &StorageManager::set_active_download, contract.torrent_hash, true,
[SelfId = actor_id(this), address](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to init storage contract: " << R.move_as_error();
td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address);
return;
}
LOG(DEBUG) << "Set active download: OK";
});
td::actor::send_closure(
storage_manager_, &StorageManager::wait_for_completion, contract.torrent_hash,
[SelfId = actor_id(this), address, hash = contract.torrent_hash, microchunk_hash = contract.microchunk_hash,
manager = storage_manager_](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(WARNING) << "Failed to download torrent " << hash.to_hex() << ": " << R.move_as_error();
td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address);
return;
}
LOG(DEBUG) << "Downloaded torrent " << hash;
td::actor::send_closure(
manager, &StorageManager::with_torrent, hash,
[SelfId, address, hash, microchunk_hash](td::Result<NodeActor::NodeState> R) {
auto r_microchunk_tree = [&]() -> td::Result<MicrochunkTree> {
TRY_RESULT(state, std::move(R));
Torrent& torrent = state.torrent;
if (!torrent.is_completed() || torrent.get_included_size() != torrent.get_info().file_size) {
return td::Status::Error("unknown error");
}
LOG(DEBUG) << "Building microchunk tree for " << hash;
TRY_RESULT(tree, MicrochunkTree::Builder::build_for_torrent(torrent));
if (tree.get_root_hash() != microchunk_hash) {
return td::Status::Error("microchunk tree hash mismatch");
}
return tree;
}();
if (r_microchunk_tree.is_error()) {
LOG(WARNING) << "Failed to download torrent " << hash.to_hex() << ": " << R.move_as_error();
td::actor::send_closure(SelfId, &StorageProvider::do_close_storage_contract, address);
} else {
td::actor::send_closure(SelfId, &StorageProvider::downloaded_torrent, address,
r_microchunk_tree.move_as_ok());
}
});
});
}
void StorageProvider::downloaded_torrent(ContractAddress address, MicrochunkTree microchunk_tree) {
auto it = contracts_.find(address);
if (it == contracts_.end()) {
LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore";
return;
}
auto& contract = it->second;
LOG(INFO) << "Finished downloading torrent " << contract.torrent_hash.to_hex() << " for contract "
<< address.to_string();
contract.state = StorageContract::st_downloaded;
contract.microchunk_tree = std::make_shared<MicrochunkTree>(std::move(microchunk_tree));
db_update_microchunk_tree(address);
db_update_storage_contract(address, false);
check_contract_active(address);
}
void StorageProvider::check_contract_active(ContractAddress address, td::Timestamp retry_until,
td::Timestamp retry_false_until) {
get_storage_contract_data(address, tonlib_client_,
[=, SelfId = actor_id(this)](td::Result<StorageContractData> R) mutable {
if (R.is_error()) {
LOG(WARNING) << "Failed to check that contract is active: " << R.move_as_error();
if (retry_until && retry_until.is_in_past()) {
delay_action(
[=]() {
td::actor::send_closure(SelfId, &StorageProvider::check_contract_active,
address, retry_until, retry_false_until);
},
td::Timestamp::in(5.0));
}
return;
}
if (R.ok().active) {
td::actor::send_closure(SelfId, &StorageProvider::activated_storage_contract, address);
} else if (retry_false_until && retry_false_until.is_in_past()) {
delay_action(
[=]() {
td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, address,
retry_until, retry_false_until);
},
td::Timestamp::in(5.0));
} else {
td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address);
}
});
}
void StorageProvider::activate_contract_cont(ContractAddress address) {
vm::CellBuilder b;
b.store_long(0x7a361688, 32); // const op::accept_storage_contract = 0x7a361688;
b.store_long(0, 64); // query_id
LOG(DEBUG) << "Sending op::accept_storage_contract to " << address.to_string();
td::actor::send_closure(
contract_wrapper_, &FabricContractWrapper::send_internal_message, address, td::make_refint(100'000'000),
b.as_cellslice(), [SelfId = actor_id(this), address](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to send activate message, retrying later: " << R.move_as_error();
delay_action([=]() { td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); },
td::Timestamp::in(10.0));
return;
}
td::actor::send_closure(SelfId, &StorageProvider::check_contract_active, address, td::Timestamp::in(60.0),
td::Timestamp::in(40.0));
});
}
void StorageProvider::activated_storage_contract(ContractAddress address) {
auto it = contracts_.find(address);
if (it == contracts_.end()) {
LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore";
return;
}
LOG(INFO) << "Storage contract " << address.to_string() << " is active";
auto& contract = it->second;
contract.state = StorageContract::st_active;
db_update_storage_contract(address, false);
alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(1.0));
}
void StorageProvider::do_close_storage_contract(ContractAddress address) {
auto it = contracts_.find(address);
if (it == contracts_.end()) {
LOG(WARNING) << "Contract " << address.to_string() << " does not exist anymore";
return;
}
LOG(INFO) << "Closing storage contract " << address.to_string();
auto& contract = it->second;
contract.state = StorageContract::st_closing;
db_update_storage_contract(address, false);
check_storage_contract_deleted(address);
}
void StorageProvider::send_close_storage_contract(ContractAddress address) {
vm::CellBuilder b;
b.store_long(0x79f937ea, 32); // const op::close_contract = 0x79f937ea;
b.store_long(0, 64); // query_id
LOG(DEBUG) << "Sending op::close_contract to " << address.to_string();
td::actor::send_closure(
contract_wrapper_, &FabricContractWrapper::send_internal_message, address, td::make_refint(100'000'000),
b.as_cellslice(), [SelfId = actor_id(this), address](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to send close message, retrying later: " << R.move_as_error();
delay_action([=]() { td::actor::send_closure(SelfId, &StorageProvider::activate_contract_cont, address); },
td::Timestamp::in(10.0));
return;
}
td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address,
td::Timestamp::in(40.0));
});
}
void StorageProvider::check_storage_contract_deleted(ContractAddress address, td::Timestamp retry_false_until) {
check_contract_exists(address, tonlib_client_, [=, SelfId = actor_id(this)](td::Result<bool> R) {
if (R.is_error()) {
delay_action(
[=]() {
td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address,
retry_false_until);
},
td::Timestamp::in(10.0));
return;
}
if (!R.move_as_ok()) {
td::actor::send_closure(SelfId, &StorageProvider::storage_contract_deleted, address);
} else if (retry_false_until && !retry_false_until.is_in_past()) {
delay_action(
[=]() {
td::actor::send_closure(SelfId, &StorageProvider::check_storage_contract_deleted, address,
retry_false_until);
},
td::Timestamp::in(5.0));
} else {
td::actor::send_closure(SelfId, &StorageProvider::send_close_storage_contract, address);
}
});
}
void StorageProvider::storage_contract_deleted(ContractAddress address) {
auto it = contracts_.find(address);
if (it == contracts_.end()) {
return;
}
LOG(INFO) << "Storage contract " << address.to_string() << " was deleted";
td::Bits256 hash = it->second.torrent_hash;
contracts_total_size_ -= it->second.file_size;
contracts_.erase(it);
bool delete_torrent = true;
for (const auto& p : contracts_) {
if (p.second.torrent_hash == hash) {
delete_torrent = false;
break;
}
}
if (delete_torrent) {
LOG(INFO) << "Deleting torrent " << hash.to_hex();
td::actor::send_closure(storage_manager_, &StorageManager::remove_torrent, hash, true,
[](td::Result<td::Unit> R) {});
}
db_update_storage_contract(address, true);
}
void StorageProvider::check_next_proof(ContractAddress address, StorageContract& contract) {
if (contract.state != StorageContract::st_active) {
return;
}
CHECK(contract.microchunk_tree != nullptr);
get_storage_contract_data(
address, tonlib_client_, [SelfId = actor_id(this), address](td::Result<StorageContractData> R) {
td::actor::send_closure(SelfId, &StorageProvider::got_next_proof_info, address, std::move(R));
});
}
void StorageProvider::got_next_proof_info(ContractAddress address, td::Result<StorageContractData> R) {
auto it = contracts_.find(address);
if (it == contracts_.end() || it->second.state != StorageContract::st_active) {
return;
}
auto& contract = it->second;
if (R.is_error()) {
LOG(ERROR) << "get_next_proof_info for " << address.to_string() << ": " << R.move_as_error();
check_contract_exists(address, tonlib_client_, [SelfId = actor_id(this), address](td::Result<bool> R) {
td::actor::send_closure(SelfId, &StorageProvider::got_contract_exists, address, std::move(R));
});
return;
}
auto data = R.move_as_ok();
if (data.balance->sgn() == 0) {
LOG(INFO) << "Balance of contract " << address.to_string() << " is zero, closing";
do_close_storage_contract(address);
return;
}
td::uint32 send_at = data.last_proof_time + data.max_span / 2, now = (td::uint32)td::Clocks::system();
if (now < send_at) {
LOG(DEBUG) << "Will send proof in " << send_at - now << "s (last_proof_time=" << data.last_proof_time
<< ", max_span=" << data.max_span << ")";
alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(send_at - now + 2));
return;
}
LOG(INFO) << "Sending proof for " << address.to_string() << ": next_proof=" << data.next_proof
<< ", max_span=" << data.max_span << ", last_proof_time=" << data.last_proof_time << " ("
<< now - data.last_proof_time << "s ago)";
td::actor::send_closure(
storage_manager_, &StorageManager::with_torrent, contract.torrent_hash,
[=, SelfId = actor_id(this), tree = contract.microchunk_tree](td::Result<NodeActor::NodeState> R) {
if (R.is_error()) {
LOG(ERROR) << "Missing torrent for " << address.to_string();
return;
}
auto state = R.move_as_ok();
td::uint64 l = data.next_proof / MicrochunkTree::MICROCHUNK_SIZE * MicrochunkTree::MICROCHUNK_SIZE;
td::uint64 r = l + MicrochunkTree::MICROCHUNK_SIZE;
auto proof = tree->get_proof(l, r, state.torrent);
td::actor::send_closure(SelfId, &StorageProvider::got_next_proof, address, std::move(proof));
});
}
void StorageProvider::got_contract_exists(ContractAddress address, td::Result<bool> R) {
auto it = contracts_.find(address);
if (it == contracts_.end() || it->second.state != StorageContract::st_active) {
return;
}
auto& contract = it->second;
if (R.is_error()) {
LOG(ERROR) << "Check contract exists for " << address.to_string() << ": " << R.move_as_error();
alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(10.0));
return;
}
if (R.ok()) {
alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(10.0));
return;
}
storage_contract_deleted(address);
}
void StorageProvider::got_next_proof(ContractAddress address, td::Result<td::Ref<vm::Cell>> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to build proof: " << R.move_as_error();
return;
}
LOG(INFO) << "Got proof, sending";
vm::CellBuilder b;
b.store_long(0x419d5d4d, 32); // const op::proof_storage = 0x419d5d4d;
b.store_long(0, 64); // query_id
b.store_ref(R.move_as_ok());
td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, address,
td::make_refint(100'000'000), b.as_cellslice(),
[SelfId = actor_id(this), address](td::Result<td::Unit> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to send proof message: " << R.move_as_error();
} else {
LOG(DEBUG) << "Proof for " << address.to_string() << " was sent";
}
td::actor::send_closure(SelfId, &StorageProvider::sent_next_proof, address);
});
}
void StorageProvider::sent_next_proof(ContractAddress address) {
auto it = contracts_.find(address);
if (it == contracts_.end() || it->second.state != StorageContract::st_active) {
return;
}
auto& contract = it->second;
alarm_timestamp().relax(contract.check_next_proof_at = td::Timestamp::in(30.0));
}
void StorageProvider::get_provider_info(bool with_balances, bool with_contracts,
td::Promise<tl_object_ptr<ton_api::storage_daemon_providerInfo>> promise) {
auto result = std::make_shared<ton_api::storage_daemon_providerInfo>();
td::MultiPromise mp;
auto ig = mp.init_guard();
ig.add_promise(promise.wrap(
[result](td::Unit) { return create_tl_object<ton_api::storage_daemon_providerInfo>(std::move(*result)); }));
result->address_ = main_address_.to_string();
result->config_ = config_.tl();
result->contracts_count_ = (int)contracts_.size();
result->contracts_total_size_ = contracts_total_size_;
if (with_balances) {
get_contract_balance(main_address_, tonlib_client_, ig.get_promise().wrap([result](td::RefInt256 balance) {
result->balance_ = balance->to_dec_string();
return td::Unit();
}));
} else {
result->balance_ = "-1";
}
if (with_contracts) {
for (const auto& p : contracts_) {
auto obj = create_tl_object<ton_api::storage_daemon_contractInfo>();
const StorageContract& contract = p.second;
obj->address_ = p.first.to_string();
obj->state_ = (int)contract.state;
obj->torrent_ = contract.torrent_hash;
obj->created_time_ = contract.created_time;
obj->rate_ = contract.rate->to_dec_string();
obj->max_span_ = contract.max_span;
obj->file_size_ = contract.file_size;
obj->downloaded_size_ = obj->file_size_;
obj->client_balance_ = "-1";
obj->contract_balance_ = "-1";
result->contracts_.push_back(std::move(obj));
}
size_t i = 0;
for (const auto& p : contracts_) {
const StorageContract& contract = p.second;
if (contract.state == StorageContract::st_downloading) {
td::actor::send_closure(storage_manager_, &StorageManager::with_torrent, contract.torrent_hash,
[i, result, promise = ig.get_promise()](td::Result<NodeActor::NodeState> R) mutable {
if (R.is_error()) {
result->contracts_[i]->downloaded_size_ = 0;
} else {
auto state = R.move_as_ok();
result->contracts_[i]->downloaded_size_ = state.torrent.get_included_ready_size();
}
promise.set_result(td::Unit());
});
}
if (with_balances) {
get_contract_balance(p.first, tonlib_client_,
[i, result, promise = ig.get_promise()](td::Result<td::RefInt256> R) mutable {
if (R.is_ok()) {
result->contracts_[i]->contract_balance_ = R.ok()->to_dec_string();
}
promise.set_result(td::Unit());
});
get_storage_contract_data(p.first, tonlib_client_,
[i, result, promise = ig.get_promise()](td::Result<StorageContractData> R) mutable {
auto S = [&]() -> td::Status {
TRY_RESULT(data, std::move(R));
result->contracts_[i]->client_balance_ = data.balance->to_dec_string();
return td::Status::OK();
}();
promise.set_result(td::Unit());
});
}
i += 1;
}
}
}
void StorageProvider::set_provider_config(Config config, td::Promise<td::Unit> promise) {
config_ = config;
LOG(INFO) << "Changing provider config: max_contracts=" << config_.max_contracts
<< ", max_total_size=" << config_.max_total_size;
db_store_config();
promise.set_result(td::Unit());
}
void StorageProvider::withdraw(ContractAddress address, td::Promise<td::Unit> promise) {
auto it = contracts_.find(address);
if (it == contracts_.end() || it->second.state != StorageContract::st_active) {
promise.set_error(td::Status::Error("No such storage contract"));
return;
}
if (it->second.state != StorageContract::st_active) {
promise.set_error(td::Status::Error("Storage contract is not active"));
return;
}
vm::CellBuilder b;
b.store_long(0x46ed2e94, 32); // const op::withdraw = 0x46ed2e94;
b.store_long(0, 64); // query_id
LOG(INFO) << "Sending op::withdraw to storage contract " << address.to_string();
td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, address,
td::make_refint(100'000'000), b.as_cellslice(), std::move(promise));
}
void StorageProvider::send_coins(ContractAddress dest, td::RefInt256 amount, std::string message,
td::Promise<td::Unit> promise) {
if (amount->sgn() < 0) {
promise.set_error(td::Status::Error("Amount is negative"));
return;
}
vm::CellBuilder b;
if (!message.empty()) {
b.store_long(0, 32);
if (b.remaining_bits() < message.size() * 8) {
promise.set_error(td::Status::Error("Message is too long (max 122 bytes)"));
return;
}
b.store_bytes(td::Slice(message));
}
LOG(INFO) << "Sending " << amount << " nanoTON to " << dest.to_string();
td::actor::send_closure(contract_wrapper_, &FabricContractWrapper::send_internal_message, dest, amount,
b.finalize_novm(), std::move(promise));
}
void StorageProvider::close_storage_contract(ContractAddress address, td::Promise<td::Unit> promise) {
if (!contracts_.count(address)) {
promise.set_error(td::Status::Error("No such storage contract"));
return;
}
do_close_storage_contract(address);
promise.set_result(td::Unit());
}
StorageProvider::Config::Config(const tl_object_ptr<ton_api::storage_daemon_providerConfig>& obj)
: max_contracts(obj->max_contracts_), max_total_size(obj->max_total_size_) {
}
tl_object_ptr<ton_api::storage_daemon_providerConfig> StorageProvider::Config::tl() const {
return create_tl_object<ton_api::storage_daemon_providerConfig>(max_contracts, max_total_size);
}

View file

@ -0,0 +1,127 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "td/actor/actor.h"
#include "storage/db.h"
#include "tonlib/tonlib/TonlibClientWrapper.h"
#include "StorageManager.h"
#include "keyring/keyring.h"
#include "smc-util.h"
#include "storage/MicrochunkTree.h"
using namespace ton;
struct ProviderParams {
bool accept_new_contracts = false;
td::RefInt256 rate_per_mb_day = td::zero_refint();
td::uint32 max_span = 0;
td::uint64 minimal_file_size = 0;
td::uint64 maximal_file_size = 0;
static td::Result<ProviderParams> create(const tl_object_ptr<ton_api::storage_daemon_provider_params>& obj);
tl_object_ptr<ton_api::storage_daemon_provider_params> tl() const;
bool to_builder(vm::CellBuilder& b) const;
};
class StorageProvider : public td::actor::Actor {
public:
struct Config {
td::uint32 max_contracts = 1000;
td::uint64 max_total_size = 128LL << 30;
Config() = default;
explicit Config(const tl_object_ptr<ton_api::storage_daemon_providerConfig>& obj);
tl_object_ptr<ton_api::storage_daemon_providerConfig> tl() const;
};
StorageProvider(ContractAddress address, std::string db_root,
td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client,
td::actor::ActorId<StorageManager> storage_manager, td::actor::ActorId<keyring::Keyring> keyring);
void start_up() override;
void alarm() override;
void get_params(td::Promise<ProviderParams> promise);
static void get_provider_params(td::actor::ActorId<tonlib::TonlibClientWrapper>, ContractAddress address,
td::Promise<ProviderParams> promise);
void set_params(ProviderParams params, td::Promise<td::Unit> promise);
void get_provider_info(bool with_balances, bool with_contracts,
td::Promise<tl_object_ptr<ton_api::storage_daemon_providerInfo>> promise);
void set_provider_config(Config config, td::Promise<td::Unit> promise);
void withdraw(ContractAddress address, td::Promise<td::Unit> promise);
void send_coins(ContractAddress dest, td::RefInt256 amount, std::string message, td::Promise<td::Unit> promise);
void close_storage_contract(ContractAddress address, td::Promise<td::Unit> promise);
private:
ContractAddress main_address_;
std::string db_root_;
td::actor::ActorId<tonlib::TonlibClientWrapper> tonlib_client_;
td::actor::ActorId<StorageManager> storage_manager_;
td::actor::ActorId<keyring::Keyring> keyring_;
td::Promise<td::Unit> init_promise_;
Config config_;
std::unique_ptr<td::KeyValue> db_;
td::actor::ActorOwn<FabricContractWrapper> contract_wrapper_;
td::uint64 last_processed_lt_ = 0;
struct StorageContract {
enum State { st_downloading = 0, st_downloaded = 1, st_active = 2, st_closing = 3 };
td::Bits256 torrent_hash;
td::Bits256 microchunk_hash;
td::uint32 created_time;
State state;
td::uint64 file_size = 0;
td::uint32 max_span = 0;
td::RefInt256 rate = td::zero_refint();
// TODO: Compute and store only one tree for duplicating torrents
std::shared_ptr<MicrochunkTree> microchunk_tree;
td::Timestamp check_next_proof_at = td::Timestamp::never();
};
std::map<ContractAddress, StorageContract> contracts_;
td::uint64 contracts_total_size_ = 0;
void process_transaction(tl_object_ptr<tonlib_api::raw_transaction> transaction);
void db_store_state();
void db_store_config();
void db_update_storage_contract(const ContractAddress& address, bool update_list);
void db_update_microchunk_tree(const ContractAddress& address);
void on_new_storage_contract(ContractAddress address, td::Promise<td::Unit> promise, int max_retries = 10);
void on_new_storage_contract_cont(ContractAddress address, StorageContractData data, td::Promise<td::Unit> promise);
void init_new_storage_contract(ContractAddress address, StorageContract& contract);
void downloaded_torrent(ContractAddress address, MicrochunkTree microchunk_tree);
void check_contract_active(ContractAddress address, td::Timestamp retry_until = td::Timestamp::in(30.0),
td::Timestamp retry_false_until = td::Timestamp::never());
void activate_contract_cont(ContractAddress address);
void activated_storage_contract(ContractAddress address);
void do_close_storage_contract(ContractAddress address);
void check_storage_contract_deleted(ContractAddress address,
td::Timestamp retry_false_until = td::Timestamp::never());
void send_close_storage_contract(ContractAddress address);
void storage_contract_deleted(ContractAddress address);
void check_next_proof(ContractAddress address, StorageContract& contract);
void got_next_proof_info(ContractAddress address, td::Result<StorageContractData> R);
void got_contract_exists(ContractAddress address, td::Result<bool> R);
void got_next_proof(ContractAddress address, td::Result<td::Ref<vm::Cell>> R);
void sent_next_proof(ContractAddress address);
};

View file

@ -0,0 +1 @@
provider-code.h

View file

@ -0,0 +1,23 @@
# Storage Provider
Simple smart-contract system for conclusion of a storage agreements.
- guarantees that the provider stores the file
- no storage - no payment
- no penalties, if provider doesn't store file client can stop payment at any time
- no control that provider upload the file: client can stop payment at any time if not satisfied
## Storage Agreements Fabric
Storage provider deploy storage agreements fabric. Any client may request fabric to deploy storage agreement contract.
Fabric provides get-method `get_storage_params` which returns
- `accept_new_contracts?` - whether provider accepts new contracts
- `rate_per_mb_day` - price in nanoTON per Megabyte per day
- `max_span` - maximal timespan between proving file storage which will be paid
- `minimal_file_size` - minimal file size accepted by provider
- `maximal_file_size` - maximal file size accepted by provider
## Storage agreement
Agreement contract has client account and accept deposits to this account.
It also knows merkle root and allows provider to withdraw money from client account by providing merkle proof of file storage.
Client can stop agreement at any time.

View file

@ -0,0 +1,4 @@
func -SPA -o storage-contract.fif ../../../crypto/smartcont/stdlib.fc storage-contract.fc && \
(echo "\"storage-contract.fif\" include boc>B \"storage-contract-code.boc\" B>file" | fift) && \
func -SPA -o storage-provider.fif ../../../crypto/smartcont/stdlib.fc storage-provider.fc && \
(echo "\"storage-provider.fif\" include boc>B \"storage-provider-code.boc\" B>file" | fift)

View file

@ -0,0 +1,23 @@
const op::offer_storage_contract = 0x107c49ef;
const op::close_contract = 0x79f937ea;
const op::contract_deployed = 0xbf7bd0c1;
const op::storage_contract_confirmed = 0xd4caedcd;
const op::reward_withdrawal = 0xa91baf56;
const op::storage_contract_terminated = 0xb6236d63;
const op::accept_storage_contract = 0x7a361688;
const op::withdraw = 0x46ed2e94;
const op::proof_storage = 0x419d5d4d;
const op::update_pubkey = 0x53f34cd6;
const op::update_storage_params = 0x54cbf19b;
const error::not_enough_money = 1001;
const error::unauthorized = 401;
const error::wrong_proof = 1002;
const error::contract_not_active = 1003;
const error::file_too_small = 1004;
const error::file_too_big = 1005;
const error::no_new_contracts = 1006;
const error::contract_already_active = 1007;
const error::no_microchunk_hash = 1008;
const error::provider_params_changed = 1009;

View file

@ -0,0 +1,40 @@
#include <fstream>
#include <iostream>
#include <vector>
int main(int argc, char** argv) {
if (argc != 3) {
std::cerr << "Usage: generate-provider-code in.boc out.h\n";
return 1;
}
std::ifstream in(argv[1], std::ios_base::ate | std::ios_base::binary);
size_t size = in.tellg();
in.seekg(0, std::ios::beg);
std::vector<char> buf(size);
if (!in.read(buf.data(), size)) {
std::cerr << "Error: cannot read input\n";
return 1;
}
in.close();
std::ofstream out(argv[2]);
out << "// Auto-generated by embed-provider-code\n";
out << "#pragma once\n";
out << "const unsigned char STORAGE_PROVIDER_CODE[" << size << "] = {\n ";
for (size_t i = 0; i < size; ++i) {
if (i != 0) {
out << ",";
if (i % 32 == 31) {
out << "\n ";
}
}
out << (int)(unsigned char)buf[i];
}
out << "\n};\n";
if (!out) {
std::cerr << "Error: cannot write output\n";
return 1;
}
out.close();
return 0;
}

View file

@ -0,0 +1,266 @@
#include "constants.fc";
const CHUNK_SIZE = 64;
const fee::receipt_value = 20000000;
const fee::storage = 10000000;
{-
storage#_ active:Bool
balance:Coins provider:MsgAddress
merkle_hash:uint256 file_size:uint64 next_proof_byte:uint64
rate_per_mb_day:Coins
max_span:uint32 last_proof_time:uint32
^[client:MsgAddress torrent_hash:uint256] = Storage;
-}
(slice, int) begin_parse_special(cell c) asm "x{D739} s,";
int check_proof(int merkle_hash, int byte_to_proof, int file_size, cell file_dict_proof) {
(slice cs, int special) = file_dict_proof.begin_parse_special();
if (~ special) {
return false;
}
if (cs~load_uint(8) != 3) { ;; Merkle proof
return false;
}
if (cs~load_uint(256) != merkle_hash) {
return false;
}
cell file_dict = cs~load_ref();
int key_len = 0;
while ((CHUNK_SIZE << key_len) < file_size) {
key_len += 1;
}
(slice data, int found?) = file_dict.udict_get?(key_len, byte_to_proof / CHUNK_SIZE);
if(found?) {
return true;
}
return false;
}
() add_to_balance(int amount) impure inline_ref {
var ds = get_data().begin_parse();
var (active, balance, residue) = (ds~load_int(1), ds~load_grams(), ds);
balance += amount;
begin_cell()
.store_int(active, 1)
.store_coins(balance)
.store_slice(residue)
.end_cell().set_data();
}
(slice, int) get_client_data(ds) {
ds = ds.preload_ref().begin_parse();
return (ds~load_msg_addr(), ds~load_uint(256));
}
() recv_internal(int msg_value, cell in_msg_full, slice in_msg_body) impure {
slice cs = in_msg_full.begin_parse();
int flags = cs~load_uint(4);
if (flags & 1) { ;; ignore all bounced messages
return ();
}
slice sender_address = cs~load_msg_addr();
if (in_msg_body.slice_empty?()) {
return add_to_balance(msg_value);
}
int op = in_msg_body~load_uint(32);
if (op == 0) {
return add_to_balance(msg_value);
}
int query_id = in_msg_body~load_uint(64);
if(op == op::offer_storage_contract) {
add_to_balance(msg_value - 2 * fee::receipt_value);
var (client, torrent_hash) = get_client_data(get_data().begin_parse());
var msg = begin_cell()
.store_uint(0x18, 6)
.store_slice(client)
.store_coins(fee::receipt_value)
.store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1)
.store_uint(op::contract_deployed, 32)
.store_uint(query_id, 64)
.store_uint(torrent_hash, 256)
.end_cell();
send_raw_message(msg, 0);
}
if (op == op::accept_storage_contract) {
var ds = get_data().begin_parse();
(int active, int balance, slice provider, slice rest) =
(ds~load_int(1), ds~load_coins(), ds~load_msg_addr(), ds);
throw_unless(error::contract_already_active, ~ active);
throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider));
begin_cell()
.store_int(true, 1)
.store_coins(balance)
.store_slice(provider)
.store_slice(rest)
.end_cell().set_data();
var (client, torrent_hash) = get_client_data(rest);
var msg = begin_cell()
.store_uint(0x18, 6)
.store_slice(client)
.store_coins(fee::receipt_value)
.store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1)
.store_uint(op::storage_contract_confirmed, 32)
.store_uint(cur_lt(), 64)
.store_uint(torrent_hash, 256)
.end_cell();
send_raw_message(msg, 0);
}
if (op == op::close_contract) {
var ds = get_data().begin_parse();
(int active, int balance, slice provider, slice rest) =
(ds~load_int(1), ds~load_coins(), ds~load_msg_addr(), ds);
var (client, torrent_hash) = get_client_data(rest);
throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider) | equal_slice_bits(sender_address, client));
var client_msg = begin_cell()
.store_uint(0x18, 6)
.store_slice(client)
.store_coins(balance)
.store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1)
.store_uint(op::storage_contract_terminated, 32)
.store_uint(cur_lt(), 64)
.store_uint(torrent_hash, 256)
.end_cell();
if(~ active) {
return send_raw_message(client_msg, 128 + 32);
}
send_raw_message(client_msg, 64);
var provider_msg = begin_cell()
.store_uint(0x18, 6)
.store_slice(provider)
.store_coins(0)
.store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1)
.store_uint(op::storage_contract_terminated, 32)
.store_uint(cur_lt(), 64)
.store_uint(torrent_hash, 256)
.end_cell();
return send_raw_message(provider_msg, 128 + 32);
}
if (op == op::withdraw) {
var ds = get_data().begin_parse();
(int active, int balance, slice provider) = (ds~load_int(1), ds~load_coins(), ds~load_msg_addr());
throw_unless(error::contract_not_active, active);
throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider));
if(balance > 0) {
raw_reserve(balance + fee::storage, 2);
}
var msg = begin_cell()
.store_uint(0x18, 6)
.store_slice(provider)
.store_coins(fee::receipt_value)
.store_uint(0, 1 + 4 + 4 + 64 + 32 + 1 + 1)
.store_uint(op::reward_withdrawal, 32)
.store_uint(query_id, 64)
.end_cell();
send_raw_message(msg, 128 + 32);
}
if (op == op::proof_storage) {
cell file_dict_proof = in_msg_body~load_ref();
var ds = get_data().begin_parse();
var (active,
balance,
provider,
merkle_hash,
file_size,
next_proof,
rate_per_mb_day,
max_span,
last_proof_time,
client_data) = (ds~load_int(1),
ds~load_coins(),
ds~load_msg_addr(),
ds~load_uint(256),
ds~load_uint(64),
ds~load_uint(64),
ds~load_coins(),
ds~load_uint(32),
ds~load_uint(32),
ds~load_ref());
throw_unless(error::contract_not_active, active);
throw_unless(error::unauthorized, equal_slice_bits(sender_address, provider));
throw_unless(error::wrong_proof, check_proof(merkle_hash, next_proof, file_size, file_dict_proof));
next_proof = rand(file_size);
int actual_span = min(now() - last_proof_time, max_span);
int bounty = muldiv(file_size * rate_per_mb_day, actual_span, 24 * 60 * 60 * 1024 * 1024);
balance = max(0, balance - bounty);
last_proof_time = now();
begin_cell()
.store_int(true, 1)
.store_coins(balance)
.store_slice(provider)
.store_uint(merkle_hash, 256)
.store_uint(file_size, 64)
.store_uint(next_proof, 64)
.store_coins(rate_per_mb_day)
.store_uint(max_span, 32)
.store_uint(last_proof_time, 32)
.store_ref(client_data)
.end_cell().set_data();
;; Send remaining balance back
cell msg = begin_cell()
.store_uint(0x18, 6)
.store_slice(sender_address)
.store_uint(0, 4 + 1 + 4 + 4 + 64 + 32 + 1 + 1)
.end_cell();
send_raw_message(msg, 64 + 2);
}
}
_ get_storage_contract_data() method_id {
var ds = get_data().begin_parse();
var (active,
balance,
provider,
merkle_hash,
file_size,
next_proof,
rate_per_mb_day,
max_span,
last_proof_time,
rest) = (ds~load_int(1),
ds~load_coins(),
ds~load_msg_addr(),
ds~load_uint(256),
ds~load_uint(64),
ds~load_uint(64),
ds~load_coins(),
ds~load_uint(32),
ds~load_uint(32),
ds);
var (client, torrent_hash) = get_client_data(rest);
return (active, balance, provider, merkle_hash, file_size,
next_proof, rate_per_mb_day, max_span, last_proof_time,
client, torrent_hash);
}
_ get_torrent_hash() method_id {
var (active, balance, provider, merkle_hash, file_size,
next_proof, rate_per_mb_day, max_span, last_proof_time,
client, torrent_hash) = get_storage_contract_data();
return torrent_hash;
}
_ is_active() method_id {
return get_data().begin_parse().preload_int(1);
}
;; next_proof, last_proof_time, max_span
_ get_next_proof_info() method_id {
var (active, balance, provider, merkle_hash, file_size,
next_proof, rate_per_mb_day, max_span, last_proof_time,
client, torrent_hash) = get_storage_contract_data();
return (next_proof, last_proof_time, max_span);
}

View file

@ -0,0 +1,421 @@
"Asm.fif" include
// automatically generated from `../../../crypto/smartcont/stdlib.fc` `storage-contract.fc` incl:`constants.fc`
PROGRAM{
DECLPROC check_proof
DECLPROC add_to_balance
DECLPROC get_client_data
DECLPROC recv_internal
86593 DECLMETHOD get_storage_contract_data
71463 DECLMETHOD get_torrent_hash
122058 DECLMETHOD is_active
81490 DECLMETHOD get_next_proof_info
check_proof PROC:<{
// merkle_hash byte_to_proof file_size file_dict_proof
x{D739} s, // merkle_hash byte_to_proof file_size cs special
NOT // merkle_hash byte_to_proof file_size cs _7
IFJMP:<{ // merkle_hash byte_to_proof file_size cs
4 BLKDROP //
FALSE // _8
}> // merkle_hash byte_to_proof file_size cs
8 LDU // merkle_hash byte_to_proof file_size _9 cs
SWAP // merkle_hash byte_to_proof file_size cs _9
3 NEQINT // merkle_hash byte_to_proof file_size cs _13
IFJMP:<{ // merkle_hash byte_to_proof file_size cs
4 BLKDROP //
FALSE // _14
}> // merkle_hash byte_to_proof file_size cs
256 LDU // merkle_hash byte_to_proof file_size _15 cs
s0 s4 XCHG // cs byte_to_proof file_size _15 merkle_hash
NEQ // cs byte_to_proof file_size _18
IFJMP:<{ // cs byte_to_proof file_size
3 BLKDROP //
FALSE // _19
}> // cs byte_to_proof file_size
s0 s2 XCHG // file_size byte_to_proof cs
LDREF // file_size byte_to_proof _44 _43
DROP // file_size byte_to_proof file_dict
0 PUSHINT // file_size byte_to_proof file_dict key_len=0
WHILE:<{
64 PUSHINT // file_size byte_to_proof file_dict key_len _25=64
OVER // file_size byte_to_proof file_dict key_len _25=64 key_len
LSHIFT // file_size byte_to_proof file_dict key_len _26
s4 PUSH // file_size byte_to_proof file_dict key_len _26 file_size
LESS // file_size byte_to_proof file_dict key_len _27
}>DO<{ // file_size byte_to_proof file_dict key_len
INC // file_size byte_to_proof file_dict key_len
}> // file_size byte_to_proof file_dict key_len
s3 POP // key_len byte_to_proof file_dict
SWAP // key_len file_dict byte_to_proof
6 RSHIFT# // key_len file_dict _33
s0 s2 XCHG // _33 file_dict key_len
DICTUGET
NULLSWAPIFNOT // _45 _46
NIP // found?
IFJMP:<{ //
TRUE // _35
}> //
FALSE // _36
}>
add_to_balance PROCREF:<{
// amount
c4 PUSH // amount _2
CTOS // amount ds
1 LDI // amount _7 ds
LDGRAMS // amount active balance residue
s0 s3 XCHG // residue active balance amount
ADD // residue active balance
SWAP
NEWC // residue balance active _13
1 STI // residue balance _15
SWAP // residue _15 balance
STGRAMS // residue _16
SWAP // _16 residue
STSLICER // _17
ENDC // _18
c4 POP
}>
get_client_data PROC:<{
// ds
PLDREF // _1
CTOS // ds
LDMSGADDR // _3 ds
256 LDU // _3 _11 _10
DROP // _3 _5
}>
recv_internal PROC:<{
// msg_value in_msg_full in_msg_body
SWAP // msg_value in_msg_body in_msg_full
CTOS // msg_value in_msg_body cs
4 LDU // msg_value in_msg_body flags cs
SWAP
1 PUSHINT // msg_value in_msg_body cs flags _9=1
AND // msg_value in_msg_body cs _10
IFJMP:<{ // msg_value in_msg_body cs
3 BLKDROP //
}> // msg_value in_msg_body cs
LDMSGADDR // msg_value in_msg_body _421 _420
DROP // msg_value in_msg_body sender_address
OVER // msg_value in_msg_body sender_address in_msg_body
SEMPTY // msg_value in_msg_body sender_address _14
IFJMP:<{ // msg_value in_msg_body sender_address
2DROP // msg_value
add_to_balance INLINECALLDICT
}> // msg_value in_msg_body sender_address
SWAP // msg_value sender_address in_msg_body
32 LDU // msg_value sender_address op in_msg_body
OVER // msg_value sender_address op in_msg_body op
0 EQINT // msg_value sender_address op in_msg_body _21
IFJMP:<{ // msg_value sender_address op in_msg_body
3 BLKDROP // msg_value
add_to_balance INLINECALLDICT
}> // msg_value sender_address op in_msg_body
64 LDU // msg_value sender_address op query_id in_msg_body
s2 PUSH
276580847 PUSHINT // msg_value sender_address op query_id in_msg_body op _27=276580847
EQUAL // msg_value sender_address op query_id in_msg_body _28
IF:<{ // msg_value sender_address op query_id in_msg_body
s0 s4 XCHG
40000000 PUSHINT // in_msg_body sender_address op query_id msg_value _31
SUB // in_msg_body sender_address op query_id _32
add_to_balance INLINECALLDICT
c4 PUSH // in_msg_body sender_address op query_id _36
CTOS // in_msg_body sender_address op query_id _37
get_client_data CALLDICT // in_msg_body sender_address op query_id client torrent_hash
3212562625 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625
0 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0
24 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _42=24
NEWC // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _42=24 _43
6 STU // in_msg_body sender_address op query_id client torrent_hash _40=3212562625 _41=0 _45
s0 s4 XCHG2 // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _45 client
STSLICER // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _46
20000000 PUSHINT // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _46 _47=20000000
STGRAMS // in_msg_body sender_address op query_id _41=0 torrent_hash _40=3212562625 _48
s1 s3 XCHG // in_msg_body sender_address op query_id _40=3212562625 torrent_hash _41=0 _48
107 STU // in_msg_body sender_address op query_id _40=3212562625 torrent_hash _62
s1 s2 XCHG // in_msg_body sender_address op query_id torrent_hash _40=3212562625 _62
32 STU // in_msg_body sender_address op query_id torrent_hash _64
s2 s(-1) PUXC // in_msg_body sender_address op query_id torrent_hash query_id _64
64 STU // in_msg_body sender_address op query_id torrent_hash _66
256 STU // in_msg_body sender_address op query_id _68
ENDC // in_msg_body sender_address op query_id msg
0 PUSHINT // in_msg_body sender_address op query_id msg _70=0
SENDRAWMSG
}>ELSE<{
s4 POP // in_msg_body sender_address op query_id
}>
OVER
2050365064 PUSHINT // in_msg_body sender_address op query_id op _72=2050365064
EQUAL // in_msg_body sender_address op query_id _73
IF:<{ // in_msg_body sender_address op query_id
c4 PUSH // in_msg_body sender_address op query_id _75
CTOS // in_msg_body sender_address op query_id ds
1 LDI // in_msg_body sender_address op query_id _81 ds
LDGRAMS // in_msg_body sender_address op query_id _81 _84 ds
LDMSGADDR // in_msg_body sender_address op query_id active balance provider rest
s0 s3 XCHG // in_msg_body sender_address op query_id rest balance provider active
NOT // in_msg_body sender_address op query_id rest balance provider _89
1007 THROWIFNOT
s5 s0 PUSH2 // in_msg_body sender_address op query_id rest balance provider sender_address provider
SDEQ // in_msg_body sender_address op query_id rest balance provider _92
401 THROWIFNOT
TRUE // in_msg_body sender_address op query_id rest balance provider _94
NEWC // in_msg_body sender_address op query_id rest balance provider _94 _95
1 STI // in_msg_body sender_address op query_id rest balance provider _97
ROT // in_msg_body sender_address op query_id rest provider _97 balance
STGRAMS // in_msg_body sender_address op query_id rest provider _98
SWAP // in_msg_body sender_address op query_id rest _98 provider
STSLICER // in_msg_body sender_address op query_id rest _99
OVER // in_msg_body sender_address op query_id rest _99 rest
STSLICER // in_msg_body sender_address op query_id rest _100
ENDC // in_msg_body sender_address op query_id rest _101
c4 POP
get_client_data CALLDICT // in_msg_body sender_address op query_id client torrent_hash
LTIME // in_msg_body sender_address op query_id client torrent_hash _107
3570068941 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941
0 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0
24 PUSHINT // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _110=24
NEWC // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _110=24 _111
6 STU // in_msg_body sender_address op query_id client torrent_hash _107 _108=3570068941 _109=0 _113
s0 s5 XCHG2 // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _113 client
STSLICER // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _114
20000000 PUSHINT // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _114 _115=20000000
STGRAMS // in_msg_body sender_address op query_id _109=0 torrent_hash _107 _108=3570068941 _116
s1 s4 XCHG // in_msg_body sender_address op query_id _108=3570068941 torrent_hash _107 _109=0 _116
107 STU // in_msg_body sender_address op query_id _108=3570068941 torrent_hash _107 _130
s1 s3 XCHG // in_msg_body sender_address op query_id _107 torrent_hash _108=3570068941 _130
32 STU // in_msg_body sender_address op query_id _107 torrent_hash _132
s1 s2 XCHG // in_msg_body sender_address op query_id torrent_hash _107 _132
64 STU // in_msg_body sender_address op query_id torrent_hash _134
256 STU // in_msg_body sender_address op query_id _136
ENDC // in_msg_body sender_address op query_id msg
0 PUSHINT // in_msg_body sender_address op query_id msg _138=0
SENDRAWMSG
}> // in_msg_body sender_address op query_id
OVER
2046375914 PUSHINT // in_msg_body sender_address op query_id op _140=2046375914
EQUAL // in_msg_body sender_address op query_id _141
IFJMP:<{ // in_msg_body sender_address op query_id
s2 s3 XCHG
3 BLKDROP // sender_address
c4 PUSH // sender_address _143
CTOS // sender_address ds
1 LDI // sender_address _149 ds
LDGRAMS // sender_address _149 _152 ds
LDMSGADDR // sender_address active balance provider rest
get_client_data CALLDICT // sender_address active balance provider client torrent_hash
s5 s2 PUSH2 // sender_address active balance provider client torrent_hash sender_address provider
SDEQ // sender_address active balance provider client torrent_hash _160
s6 s2 XCPU // _160 active balance provider client torrent_hash sender_address client
SDEQ // _160 active balance provider client torrent_hash _161
s1 s6 XCHG // torrent_hash active balance provider client _160 _161
OR // torrent_hash active balance provider client _162
401 THROWIFNOT
LTIME // torrent_hash active balance provider client _165
3055775075 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075
0 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 _167=0
24 PUSHINT // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _168=24
NEWC // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _168=24 _169
6 STU // torrent_hash active balance provider client _165 _166=3055775075 _167=0 _171
s0 s4 XCHG2 // torrent_hash active balance provider _167=0 _165 _166=3055775075 _171 client
STSLICER // torrent_hash active balance provider _167=0 _165 _166=3055775075 _172
s0 s5 XCHG2 // torrent_hash active _166=3055775075 provider _167=0 _165 _172 balance
STGRAMS // torrent_hash active _166=3055775075 provider _167=0 _165 _173
s1 s2 XCHG // torrent_hash active _166=3055775075 provider _165 _167=0 _173
107 STU // torrent_hash active _166=3055775075 provider _165 _187
s1 s3 XCHG // torrent_hash active _165 provider _166=3055775075 _187
32 STU // torrent_hash active _165 provider _189
s1 s2 XCHG // torrent_hash active provider _165 _189
64 STU // torrent_hash active provider _191
s3 s(-1) PUXC // torrent_hash active provider torrent_hash _191
256 STU // torrent_hash active provider _193
ENDC // torrent_hash active provider client_msg
s0 s2 XCHG // torrent_hash client_msg provider active
NOT // torrent_hash client_msg provider _195
IFJMP:<{ // torrent_hash client_msg provider
DROP
NIP // client_msg
160 PUSHINT // client_msg _198
SENDRAWMSG
}> // torrent_hash client_msg provider
SWAP
64 PUSHINT // torrent_hash provider client_msg _200=64
SENDRAWMSG
LTIME // torrent_hash provider _203
3055775075 PUSHINT // torrent_hash provider _203 _204=3055775075
0 PUSHINT // torrent_hash provider _203 _204=3055775075 _205=0
24 PUSHINT // torrent_hash provider _203 _204=3055775075 _205=0 _206=24
NEWC // torrent_hash provider _203 _204=3055775075 _205=0 _206=24 _207
6 STU // torrent_hash provider _203 _204=3055775075 _205=0 _209
s0 s4 XCHG2 // torrent_hash _205=0 _203 _204=3055775075 _209 provider
STSLICER // torrent_hash _205=0 _203 _204=3055775075 _210
s3 PUSH // torrent_hash _205=0 _203 _204=3055775075 _210 _211=0
STGRAMS // torrent_hash _205=0 _203 _204=3055775075 _212
s1 s3 XCHG // torrent_hash _204=3055775075 _203 _205=0 _212
107 STU // torrent_hash _204=3055775075 _203 _226
s1 s2 XCHG // torrent_hash _203 _204=3055775075 _226
32 STU // torrent_hash _203 _228
64 STU // torrent_hash _230
256 STU // _232
ENDC // provider_msg
160 PUSHINT // provider_msg _236
SENDRAWMSG
}> // in_msg_body sender_address op query_id
OVER
1189949076 PUSHINT // in_msg_body sender_address op query_id op _238=1189949076
EQUAL // in_msg_body sender_address op query_id _239
IF:<{ // in_msg_body sender_address op query_id
c4 PUSH // in_msg_body sender_address op query_id _241
CTOS // in_msg_body sender_address op query_id ds
1 LDI // in_msg_body sender_address op query_id _246 ds
LDGRAMS // in_msg_body sender_address op query_id _246 _249 ds
LDMSGADDR // in_msg_body sender_address op query_id _246 _249 _449 _448
DROP // in_msg_body sender_address op query_id active balance provider
s0 s2 XCHG // in_msg_body sender_address op query_id provider balance active
1003 THROWIFNOT
s4 s1 PUSH2 // in_msg_body sender_address op query_id provider balance sender_address provider
SDEQ // in_msg_body sender_address op query_id provider balance _256
401 THROWIFNOT
DUP // in_msg_body sender_address op query_id provider balance balance
0 GTINT // in_msg_body sender_address op query_id provider balance _259
IF:<{ // in_msg_body sender_address op query_id provider balance
10000000 PUSHINT // in_msg_body sender_address op query_id provider balance _260=10000000
ADD // in_msg_body sender_address op query_id provider _261
2 PUSHINT // in_msg_body sender_address op query_id provider _261 _262=2
RAWRESERVE
}>ELSE<{
DROP // in_msg_body sender_address op query_id provider
}>
2837163862 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862
0 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 _266=0
24 PUSHINT // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _267=24
NEWC // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _267=24 _268
6 STU // in_msg_body sender_address op query_id provider _265=2837163862 _266=0 _270
s0 s3 XCHG2 // in_msg_body sender_address op query_id _266=0 _265=2837163862 _270 provider
STSLICER // in_msg_body sender_address op query_id _266=0 _265=2837163862 _271
20000000 PUSHINT // in_msg_body sender_address op query_id _266=0 _265=2837163862 _271 _272=20000000
STGRAMS // in_msg_body sender_address op query_id _266=0 _265=2837163862 _273
s1 s2 XCHG // in_msg_body sender_address op query_id _265=2837163862 _266=0 _273
107 STU // in_msg_body sender_address op query_id _265=2837163862 _287
32 STU // in_msg_body sender_address op query_id _289
64 STU // in_msg_body sender_address op _291
ENDC // in_msg_body sender_address op msg
160 PUSHINT // in_msg_body sender_address op msg _295
SENDRAWMSG
}>ELSE<{
DROP // in_msg_body sender_address op
}>
1100832077 PUSHINT // in_msg_body sender_address op _297=1100832077
EQUAL // in_msg_body sender_address _298
IF:<{ // in_msg_body sender_address
SWAP // sender_address in_msg_body
LDREF // sender_address _451 _450
DROP // sender_address file_dict_proof
c4 PUSH // sender_address file_dict_proof _303
CTOS // sender_address file_dict_proof ds
1 LDI // sender_address file_dict_proof _315 ds
LDGRAMS // sender_address file_dict_proof _315 _318 ds
LDMSGADDR // sender_address file_dict_proof _315 _318 _320 ds
256 LDU // sender_address file_dict_proof _315 _318 _320 _322 ds
64 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 ds
64 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 ds
LDGRAMS // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 ds
32 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 ds
32 LDU // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 _336 ds
LDREF // sender_address file_dict_proof _315 _318 _320 _322 _325 _328 _331 _333 _336 _471 _470
DROP // sender_address file_dict_proof active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time client_data
s0 s9 XCHG // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time active
1003 THROWIFNOT
s10 s6 PUSH2 // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time sender_address provider
SDEQ // sender_address file_dict_proof client_data balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time _344
401 THROWIFNOT
s5 s3 s(-1) PUXC2
s5 s10 PUXC // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day merkle_hash next_proof file_size file_dict_proof
check_proof CALLDICT // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day _347
1002 THROWIFNOT
s2 PUSH // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day file_size
RAND // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day next_proof
NOW // sender_address max_span client_data balance provider merkle_hash file_size last_proof_time rate_per_mb_day next_proof _351
s0 s3 XCHG2 // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _351 last_proof_time
SUB // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _352
s8 PUSH // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _352 max_span
MIN // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span
s3 s1 PUSH2 // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span file_size rate_per_mb_day
MUL // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day actual_span _355
SWAP
90596966400 PUSHINTX // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day _355 actual_span _364
MULDIV // sender_address max_span client_data balance provider merkle_hash file_size next_proof rate_per_mb_day bounty
s0 s6 XCHG
0 PUSHINT
s0 s7 XCHG // sender_address max_span client_data _366=0 provider merkle_hash file_size next_proof rate_per_mb_day balance bounty
SUB // sender_address max_span client_data _366=0 provider merkle_hash file_size next_proof rate_per_mb_day _367
s1 s6 XCHG // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof _366=0 _367
MAX // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance
NOW // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time
TRUE // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _370
NEWC // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _370 _371
1 STI // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof balance last_proof_time _373
ROT // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof last_proof_time _373 balance
STGRAMS // sender_address max_span client_data rate_per_mb_day provider merkle_hash file_size next_proof last_proof_time _374
s0 s5 XCHG2 // sender_address max_span client_data rate_per_mb_day last_proof_time merkle_hash file_size next_proof _374 provider
STSLICER // sender_address max_span client_data rate_per_mb_day last_proof_time merkle_hash file_size next_proof _375
s1 s3 XCHG // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof file_size merkle_hash _375
256 STU // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof file_size _377
64 STU // sender_address max_span client_data rate_per_mb_day last_proof_time next_proof _379
64 STU // sender_address max_span client_data rate_per_mb_day last_proof_time _381
ROT // sender_address max_span client_data last_proof_time _381 rate_per_mb_day
STGRAMS // sender_address max_span client_data last_proof_time _382
s1 s3 XCHG // sender_address last_proof_time client_data max_span _382
32 STU // sender_address last_proof_time client_data _384
s1 s2 XCHG // sender_address client_data last_proof_time _384
32 STU // sender_address client_data _386
STREF // sender_address _387
ENDC // sender_address _388
c4 POP
0 PUSHINT // sender_address _391=0
24 PUSHINT // sender_address _391=0 _392=24
NEWC // sender_address _391=0 _392=24 _393
6 STU // sender_address _391=0 _395
ROT // _391=0 _395 sender_address
STSLICER // _391=0 _396
111 STU // _412
ENDC // msg
66 PUSHINT // msg _416
SENDRAWMSG
}>ELSE<{
2DROP //
}>
}>
get_storage_contract_data PROC:<{
//
c4 PUSH // _1
CTOS // ds
1 LDI // _13 ds
LDGRAMS // _13 _16 ds
LDMSGADDR // _13 _16 _18 ds
256 LDU // _13 _16 _18 _20 ds
64 LDU // _13 _16 _18 _20 _23 ds
64 LDU // _13 _16 _18 _20 _23 _26 ds
LDGRAMS // _13 _16 _18 _20 _23 _26 _29 ds
32 LDU // _13 _16 _18 _20 _23 _26 _29 _31 ds
32 LDU // active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time rest
get_client_data CALLDICT // active balance provider merkle_hash file_size next_proof rate_per_mb_day max_span last_proof_time client torrent_hash
}>
get_torrent_hash PROC:<{
//
get_storage_contract_data CALLDICT // _12 _13 _14 _15 _16 _17 _18 _19 _20 _21 _22
10 1 BLKDROP2 // torrent_hash
}>
is_active PROC:<{
//
c4 PUSH // _0
CTOS // _1
1 PLDI // _3
}>
get_next_proof_info PROC:<{
//
get_storage_contract_data CALLDICT // _12 _13 _14 _15 _16 _17 _18 _19 _20 _21 _22
2DROP
s2 POP
5 3 BLKDROP2 // next_proof last_proof_time max_span
}>
}END>c

View file

@ -0,0 +1,227 @@
;; Storage contract fabric
#include "constants.fc";
const min_deploy_amount = 50000000;
cell storage_contract_code() asm """ "storage-contract-code.boc" file>B B>boc PUSHREF """;
slice calculate_address_by_stateinit(cell state_init) {
return begin_cell().store_uint(4, 3)
.store_int(0, 8)
.store_uint(cell_hash(state_init), 256)
.end_cell()
.begin_parse();
}
cell build_storage_contract_stateinit(int merkle_hash, int file_size, int rate_per_mb_day,
int max_span, slice client, int torrent_hash) {
cell data = begin_cell()
.store_int(0, 1) ;; active
.store_coins(0) ;; client balance
.store_slice(my_address())
.store_uint(merkle_hash, 256)
.store_uint(file_size, 64)
.store_uint(0, 64) ;; next_proof
.store_coins(rate_per_mb_day)
.store_uint(max_span, 32)
.store_uint(now(), 32) ;; last_proof_time
.store_ref(begin_cell()
.store_slice(client)
.store_uint(torrent_hash, 256)
.end_cell())
.end_cell();
cell state_init = begin_cell()
.store_uint(0, 2)
.store_maybe_ref(storage_contract_code())
.store_maybe_ref(data)
.store_uint(0, 1) .end_cell();
return state_init;
}
() deploy_storage_contract (slice client, int query_id, int file_size, int merkle_hash, int torrent_hash,
int expected_rate, int expected_max_span) impure {
var ds = get_data().begin_parse();
var (wallet_data,
accept_new_contracts?,
rate_per_mb_day,
max_span,
minimal_file_size,
maximal_file_size) = (ds~load_bits(32 + 32 + 256),
ds~load_int(1),
ds~load_coins(),
ds~load_uint(32),
ds~load_uint(64),
ds~load_uint(64));
throw_unless(error::no_new_contracts, accept_new_contracts?);
throw_unless(error::file_too_small, file_size >= minimal_file_size);
throw_unless(error::file_too_big, file_size <= maximal_file_size);
throw_unless(error::provider_params_changed, expected_rate == rate_per_mb_day);
throw_unless(error::provider_params_changed, expected_max_span == max_span);
cell state_init = build_storage_contract_stateinit(merkle_hash, file_size, rate_per_mb_day,
max_span, client, torrent_hash);
cell msg = begin_cell()
.store_uint(0x18, 6)
.store_slice(calculate_address_by_stateinit(state_init))
.store_coins(0)
.store_uint(4 + 2, 1 + 4 + 4 + 64 + 32 + 1 + 1 + 1)
.store_ref(state_init)
.store_uint(op::offer_storage_contract, 32)
.store_uint(query_id, 64)
.end_cell();
send_raw_message(msg, 64);
}
() recv_internal(int msg_value, cell in_msg_full, slice in_msg_body) impure {
slice cs = in_msg_full.begin_parse();
int flags = cs~load_uint(4);
if ((flags & 1) | in_msg_body.slice_empty?()) { ;; ignore all bounced and empty messages
return ();
}
slice sender_address = cs~load_msg_addr();
int op = in_msg_body~load_uint(32);
if (op == 0) { ;; transfer with text message
return ();
}
int query_id = in_msg_body~load_uint(64);
if(op == op::offer_storage_contract) {
throw_unless(error::not_enough_money, msg_value >= min_deploy_amount);
;; torrent_info piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256)
;; microchunk_hash:(Maybe (## 256)) description:Text = TorrentInfo;
;;
;; new_storage_contract#00000001 query_id:uint64 info:(^ TorrentInfo) microchunk_hash:uint256
;; expected_rate:Coins expected_max_span:uint32 = NewStorageContract;
cell torrent_info = in_msg_body~load_ref();
int torrent_hash = cell_hash(torrent_info);
slice info_cs = torrent_info.begin_parse();
info_cs~skip_bits(32);
int file_size = info_cs~load_uint(64);
int merkle_hash = in_msg_body~load_uint(256);
int expected_rate = in_msg_body~load_coins();
int expected_max_span = in_msg_body~load_uint(32);
deploy_storage_contract(sender_address, query_id, file_size, merkle_hash, torrent_hash,
expected_rate, expected_max_span);
return ();
}
if(op == op::storage_contract_terminated) {
return ();
}
if(op == op::update_pubkey) {
if(~ equal_slice_bits(my_address(), sender_address)) {
return ();
}
var ds = get_data().begin_parse();
var (seqno_subwallet,
_,
non_wallet_data) = (ds~load_bits(32 + 32),
ds~load_uint(256),
ds);
int new_pubkey = in_msg_body~load_uint(256);
set_data(begin_cell()
.store_slice(seqno_subwallet)
.store_uint(new_pubkey, 256)
.store_slice(non_wallet_data)
.end_cell());
}
if(op == op::update_storage_params) {
if(~ equal_slice_bits(my_address(), sender_address)) {
return ();
}
var ds = get_data().begin_parse();
var wallet_data = ds~load_bits(32 + 32 + 256);
var(accept_new_contracts?,
rate_per_mb_day,
max_span,
minimal_file_size,
maximal_file_size) = (in_msg_body~load_int(1),
in_msg_body~load_coins(),
in_msg_body~load_uint(32),
in_msg_body~load_uint(64),
in_msg_body~load_uint(64));
set_data(begin_cell()
.store_slice(wallet_data)
.store_int(accept_new_contracts?, 1)
.store_coins(rate_per_mb_day)
.store_uint(max_span, 32)
.store_uint(minimal_file_size, 64)
.store_uint(maximal_file_size, 64)
.end_cell());
}
}
() recv_external(slice in_msg) impure {
var signature = in_msg~load_bits(512);
var cs = in_msg;
var (subwallet_id, valid_until, msg_seqno) = (cs~load_uint(32), cs~load_uint(32), cs~load_uint(32));
throw_if(35, valid_until <= now());
var ds = get_data().begin_parse();
var (stored_seqno,
stored_subwallet,
public_key,
non_wallet_data) = (ds~load_uint(32),
ds~load_uint(32),
ds~load_uint(256),
ds);
throw_unless(33, msg_seqno == stored_seqno);
throw_unless(34, subwallet_id == stored_subwallet);
throw_unless(35, check_signature(slice_hash(in_msg), signature, public_key));
accept_message();
cs~touch();
while (cs.slice_refs()) {
var mode = cs~load_uint(8);
send_raw_message(cs~load_ref(), mode);
}
set_data(begin_cell()
.store_uint(stored_seqno + 1, 32)
.store_uint(stored_subwallet, 32)
.store_uint(public_key, 256)
.store_slice(non_wallet_data)
.end_cell());
}
;; Get methods
int seqno() method_id {
return get_data().begin_parse().preload_uint(32);
}
int get_public_key() method_id {
var cs = get_data().begin_parse();
cs~load_uint(64);
return cs.preload_uint(256);
}
;; seqno, subwallet, key
_ get_wallet_params() method_id {
var ds = get_data().begin_parse();
var (stored_seqno, stored_subwallet, public_key) = (ds~load_uint(32), ds~load_uint(32), ds~load_uint(256));
return (stored_seqno, stored_subwallet, public_key);
}
_ get_storage_params() method_id {
var ds = get_data().begin_parse();
var (wallet_data,
accept_new_contracts?,
rate_per_mb_day,
max_span,
minimal_file_size,
maximal_file_size) = (ds~load_bits(32 + 32 + 256),
ds~load_int(1),
ds~load_coins(),
ds~load_uint(32),
ds~load_uint(64),
ds~load_uint(64));
return (accept_new_contracts?, rate_per_mb_day, max_span, minimal_file_size, maximal_file_size);
}
slice get_storage_contract_address(int merkle_hash, int file_size, slice client, int torrent_hash) method_id {
var (_, rate_per_mb_day, max_span, _, _) = get_storage_params();
cell state_init = build_storage_contract_stateinit(merkle_hash, file_size, rate_per_mb_day, max_span, client, torrent_hash);
return calculate_address_by_stateinit(state_init);
}

View file

@ -0,0 +1,340 @@
"Asm.fif" include
// automatically generated from `../../../crypto/smartcont/stdlib.fc` `storage-provider.fc` incl:`constants.fc`
PROGRAM{
DECLPROC calculate_address_by_stateinit
DECLPROC build_storage_contract_stateinit
DECLPROC deploy_storage_contract
DECLPROC recv_internal
DECLPROC recv_external
85143 DECLMETHOD seqno
78748 DECLMETHOD get_public_key
130271 DECLMETHOD get_wallet_params
104346 DECLMETHOD get_storage_params
119729 DECLMETHOD get_storage_contract_address
calculate_address_by_stateinit PROC:<{
// state_init
HASHCU // _1
0 PUSHINT // _1 _2=0
4 PUSHINT // _1 _2=0 _3=4
NEWC // _1 _2=0 _3=4 _4
3 STU // _1 _2=0 _6
8 STI // _1 _8
256 STU // _10
ENDC // _11
CTOS // _12
}>
build_storage_contract_stateinit PROC:<{
// merkle_hash file_size rate_per_mb_day max_span client torrent_hash
NEWC
ROT // merkle_hash file_size rate_per_mb_day max_span torrent_hash _7 client
STSLICER // merkle_hash file_size rate_per_mb_day max_span torrent_hash _8
256 STU // merkle_hash file_size rate_per_mb_day max_span _10
ENDC // merkle_hash file_size rate_per_mb_day max_span _11
NOW // merkle_hash file_size rate_per_mb_day max_span _11 _12
0 PUSHINT // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0
DUP // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _14=0
NEWC // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _14=0 _15
1 STI // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _17
OVER // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _17 _18=0
STGRAMS // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _19
MYADDR // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _19 _20
STSLICER // merkle_hash file_size rate_per_mb_day max_span _11 _12 _13=0 _21
s1 s7 XCHG // _13=0 file_size rate_per_mb_day max_span _11 _12 merkle_hash _21
256 STU // _13=0 file_size rate_per_mb_day max_span _11 _12 _23
s1 s5 XCHG // _13=0 _12 rate_per_mb_day max_span _11 file_size _23
64 STU // _13=0 _12 rate_per_mb_day max_span _11 _25
s1 s5 XCHG // _11 _12 rate_per_mb_day max_span _13=0 _25
64 STU // _11 _12 rate_per_mb_day max_span _27
ROT // _11 _12 max_span _27 rate_per_mb_day
STGRAMS // _11 _12 max_span _28
32 STU // _11 _12 _30
32 STU // _11 _32
STREF // _33
ENDC // data
0 PUSHINT // data _36=0
"storage-contract-code.boc" file>B B>boc PUSHREF // data _36=0 _37
OVER // data _36=0 _37 _38=0
NEWC // data _36=0 _37 _38=0 _39
2 STU // data _36=0 _37 _41
STOPTREF // data _36=0 _42
s1 s2 XCHG // _36=0 data _42
STOPTREF // _36=0 _43
1 STU // _45
ENDC // state_init
}>
deploy_storage_contract PROC:<{
// client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span
c4 PUSH // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _8
CTOS // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds
320 PUSHINT // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds _21
LDSLICEX // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _91 _90
NIP // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span ds
1 LDI // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 ds
LDGRAMS // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 ds
32 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 ds
64 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 _31 ds
64 LDU // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span _23 _26 _28 _31 _101 _100
DROP // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size
s0 s4 XCHG // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span minimal_file_size accept_new_contracts?
1006 THROWIFNOT
s8 s(-1) PUXC // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span file_size minimal_file_size
GEQ // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span maximal_file_size rate_per_mb_day max_span _40
1004 THROWIFNOT
s7 s2 PUXC // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span max_span rate_per_mb_day file_size maximal_file_size
LEQ // client query_id file_size merkle_hash torrent_hash expected_rate expected_max_span max_span rate_per_mb_day _43
1005 THROWIFNOT
s3 s3 XCPU // client query_id file_size merkle_hash torrent_hash rate_per_mb_day expected_max_span max_span expected_rate rate_per_mb_day
EQUAL // client query_id file_size merkle_hash torrent_hash rate_per_mb_day expected_max_span max_span _46
1009 THROWIFNOT
TUCK // client query_id file_size merkle_hash torrent_hash rate_per_mb_day max_span expected_max_span max_span
EQUAL // client query_id file_size merkle_hash torrent_hash rate_per_mb_day max_span _49
1009 THROWIFNOT
2SWAP
s1 s5 XCHG
s1 s6 XCHG // query_id merkle_hash file_size rate_per_mb_day max_span client torrent_hash
build_storage_contract_stateinit CALLDICT // query_id state_init
276580847 PUSHINT // query_id state_init _54=276580847
6 PUSHINT // query_id state_init _54=276580847 _57
24 PUSHINT // query_id state_init _54=276580847 _57 _58=24
NEWC // query_id state_init _54=276580847 _57 _58=24 _59
6 STU // query_id state_init _54=276580847 _57 _61
s3 PUSH // query_id state_init _54=276580847 _57 _61 state_init
calculate_address_by_stateinit CALLDICT // query_id state_init _54=276580847 _57 _61 _62
STSLICER // query_id state_init _54=276580847 _57 _63
0 PUSHINT // query_id state_init _54=276580847 _57 _63 _64=0
STGRAMS // query_id state_init _54=276580847 _57 _65
108 STU // query_id state_init _54=276580847 _81
s1 s2 XCHG // query_id _54=276580847 state_init _81
STREF // query_id _54=276580847 _82
32 STU // query_id _84
64 STU // _86
ENDC // msg
64 PUSHINT // msg _88=64
SENDRAWMSG
}>
recv_internal PROC:<{
SAMEALTSAVE // msg_value in_msg_full in_msg_body
SWAP // msg_value in_msg_body in_msg_full
CTOS // msg_value in_msg_body cs
4 LDU // msg_value in_msg_body flags cs
SWAP
1 PUSHINT // msg_value in_msg_body cs flags _9=1
AND // msg_value in_msg_body cs _10
s2 PUSH // msg_value in_msg_body cs _10 in_msg_body
SEMPTY // msg_value in_msg_body cs _10 _11
OR // msg_value in_msg_body cs _12
IFJMP:<{ // msg_value in_msg_body cs
3 BLKDROP //
}> // msg_value in_msg_body cs
LDMSGADDR // msg_value in_msg_body _141 _140
DROP // msg_value in_msg_body sender_address
SWAP // msg_value sender_address in_msg_body
32 LDU // msg_value sender_address op in_msg_body
OVER // msg_value sender_address op in_msg_body op
0 EQINT // msg_value sender_address op in_msg_body _21
IFJMP:<{ // msg_value sender_address op in_msg_body
4 BLKDROP //
}> // msg_value sender_address op in_msg_body
64 LDU // msg_value sender_address op query_id in_msg_body
s2 PUSH
276580847 PUSHINT // msg_value sender_address op query_id in_msg_body op _26=276580847
EQUAL // msg_value sender_address op query_id in_msg_body _27
IFJMP:<{ // msg_value sender_address op query_id in_msg_body
s2 POP // msg_value sender_address in_msg_body query_id
s0 s3 XCHG
50000000 PUSHINT // query_id sender_address in_msg_body msg_value _29=50000000
GEQ // query_id sender_address in_msg_body _30
1001 THROWIFNOT
LDREF // query_id sender_address torrent_info in_msg_body
OVER // query_id sender_address torrent_info in_msg_body torrent_info
HASHCU // query_id sender_address torrent_info in_msg_body torrent_hash
s0 s2 XCHG // query_id sender_address torrent_hash in_msg_body torrent_info
CTOS // query_id sender_address torrent_hash in_msg_body info_cs
32 PUSHINT // query_id sender_address torrent_hash in_msg_body info_cs _40=32
SDSKIPFIRST // query_id sender_address torrent_hash in_msg_body info_cs
64 LDU // query_id sender_address torrent_hash in_msg_body _149 _148
DROP // query_id sender_address torrent_hash in_msg_body file_size
SWAP // query_id sender_address torrent_hash file_size in_msg_body
256 LDU // query_id sender_address torrent_hash file_size merkle_hash in_msg_body
LDGRAMS // query_id sender_address torrent_hash file_size merkle_hash expected_rate in_msg_body
32 LDU // query_id sender_address torrent_hash file_size merkle_hash expected_rate _155 _154
DROP // query_id sender_address torrent_hash file_size merkle_hash expected_rate expected_max_span
s5 s6 XCHG
s3 s4 XCHG
s2 s3 XCHG // sender_address query_id file_size merkle_hash torrent_hash expected_rate expected_max_span
deploy_storage_contract CALLDICT
}> // msg_value sender_address op query_id in_msg_body
NIP
s3 POP // in_msg_body sender_address op
DUP
3055775075 PUSHINT // in_msg_body sender_address op op _58=3055775075
EQUAL // in_msg_body sender_address op _59
IFJMP:<{ // in_msg_body sender_address op
3 BLKDROP //
}> // in_msg_body sender_address op
DUP
1408453846 PUSHINT // in_msg_body sender_address op op _60=1408453846
EQUAL // in_msg_body sender_address op _61
IF:<{ // in_msg_body sender_address op
MYADDR // in_msg_body sender_address op _62
s2 PUSH // in_msg_body sender_address op _62 sender_address
SDEQ // in_msg_body sender_address op _63
NOT // in_msg_body sender_address op _64
IFJMP:<{ // in_msg_body sender_address op
3 BLKDROP //
RETALT
}> // in_msg_body sender_address op
c4 PUSH // in_msg_body sender_address op _66
CTOS // in_msg_body sender_address op ds
64 LDSLICE // in_msg_body sender_address op _71 ds
256 LDU // in_msg_body sender_address op _71 _159 _158
NIP // in_msg_body sender_address op seqno_subwallet non_wallet_data
s0 s4 XCHG // non_wallet_data sender_address op seqno_subwallet in_msg_body
256 LDU // non_wallet_data sender_address op seqno_subwallet new_pubkey in_msg_body
NEWC // non_wallet_data sender_address op seqno_subwallet new_pubkey in_msg_body _83
s0 s3 XCHG2 // non_wallet_data sender_address op in_msg_body new_pubkey _83 seqno_subwallet
STSLICER // non_wallet_data sender_address op in_msg_body new_pubkey _84
256 STU // non_wallet_data sender_address op in_msg_body _86
s0 s4 XCHG2 // in_msg_body sender_address op _86 non_wallet_data
STSLICER // in_msg_body sender_address op _87
ENDC // in_msg_body sender_address op _88
c4 POP
}> // in_msg_body sender_address op
1422651803 PUSHINT // in_msg_body sender_address op _90=1422651803
EQUAL // in_msg_body sender_address _91
IF:<{ // in_msg_body sender_address
MYADDR // in_msg_body sender_address _92
SWAP // in_msg_body _92 sender_address
SDEQ // in_msg_body _93
NOT // in_msg_body _94
IFJMP:<{ // in_msg_body
DROP //
RETALT
}> // in_msg_body
c4 PUSH // in_msg_body _96
CTOS // in_msg_body ds
320 PUSHINT // in_msg_body ds _104
LDSLICEX // in_msg_body _163 _162
DROP // in_msg_body wallet_data
SWAP // wallet_data in_msg_body
1 LDI // wallet_data _111 in_msg_body
LDGRAMS // wallet_data _111 _114 in_msg_body
32 LDU // wallet_data _111 _114 _116 in_msg_body
64 LDU // wallet_data _111 _114 _116 _119 in_msg_body
64 LDU // wallet_data _111 _114 _116 _119 _173 _172
DROP // wallet_data accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size
NEWC // wallet_data accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size _125
s0 s6 XCHG2 // maximal_file_size accept_new_contracts? rate_per_mb_day max_span minimal_file_size _125 wallet_data
STSLICER // maximal_file_size accept_new_contracts? rate_per_mb_day max_span minimal_file_size _126
s1 s4 XCHG // maximal_file_size minimal_file_size rate_per_mb_day max_span accept_new_contracts? _126
1 STI // maximal_file_size minimal_file_size rate_per_mb_day max_span _128
ROT // maximal_file_size minimal_file_size max_span _128 rate_per_mb_day
STGRAMS // maximal_file_size minimal_file_size max_span _129
32 STU // maximal_file_size minimal_file_size _131
64 STU // maximal_file_size _133
64 STU // _135
ENDC // _136
c4 POP
}>ELSE<{
2DROP //
}>
}>
recv_external PROC:<{
// in_msg
9 PUSHPOW2 // in_msg _3=512
LDSLICEX // signature in_msg
DUP // signature in_msg cs
32 LDU // signature in_msg _9 cs
32 LDU // signature in_msg _9 _12 cs
32 LDU // signature in_msg subwallet_id valid_until msg_seqno cs
s0 s2 XCHG
NOW // signature in_msg subwallet_id cs msg_seqno valid_until _19
LEQ // signature in_msg subwallet_id cs msg_seqno _20
35 THROWIF
c4 PUSH // signature in_msg subwallet_id cs msg_seqno _23
CTOS // signature in_msg subwallet_id cs msg_seqno ds
32 LDU // signature in_msg subwallet_id cs msg_seqno _29 ds
32 LDU // signature in_msg subwallet_id cs msg_seqno _29 _32 ds
256 LDU // signature in_msg subwallet_id cs msg_seqno stored_seqno stored_subwallet public_key non_wallet_data
s4 s3 XCPU // signature in_msg subwallet_id cs non_wallet_data stored_seqno stored_subwallet public_key msg_seqno stored_seqno
EQUAL // signature in_msg subwallet_id cs non_wallet_data stored_seqno stored_subwallet public_key _39
33 THROWIFNOT
s5 s1 XCPU // signature in_msg public_key cs non_wallet_data stored_seqno stored_subwallet subwallet_id stored_subwallet
EQUAL // signature in_msg public_key cs non_wallet_data stored_seqno stored_subwallet _42
34 THROWIFNOT
s0 s5 XCHG // signature stored_subwallet public_key cs non_wallet_data stored_seqno in_msg
HASHSU // signature stored_subwallet public_key cs non_wallet_data stored_seqno _45
s0 s6 s4 XC2PU // stored_seqno stored_subwallet public_key cs non_wallet_data _45 signature public_key
CHKSIGNU // stored_seqno stored_subwallet public_key cs non_wallet_data _46
35 THROWIFNOT
ACCEPT
SWAP // stored_seqno stored_subwallet public_key non_wallet_data cs
WHILE:<{
DUP // stored_seqno stored_subwallet public_key non_wallet_data cs cs
SREFS // stored_seqno stored_subwallet public_key non_wallet_data cs _51
}>DO<{ // stored_seqno stored_subwallet public_key non_wallet_data cs
8 LDU // stored_seqno stored_subwallet public_key non_wallet_data mode cs
LDREF // stored_seqno stored_subwallet public_key non_wallet_data mode _56 cs
s0 s2 XCHG // stored_seqno stored_subwallet public_key non_wallet_data cs _56 mode
SENDRAWMSG
}> // stored_seqno stored_subwallet public_key non_wallet_data cs
DROP // stored_seqno stored_subwallet public_key non_wallet_data
s0 s3 XCHG // non_wallet_data stored_subwallet public_key stored_seqno
INC // non_wallet_data stored_subwallet public_key _60
NEWC // non_wallet_data stored_subwallet public_key _60 _61
32 STU // non_wallet_data stored_subwallet public_key _63
s1 s2 XCHG // non_wallet_data public_key stored_subwallet _63
32 STU // non_wallet_data public_key _65
256 STU // non_wallet_data _67
SWAP // _67 non_wallet_data
STSLICER // _68
ENDC // _69
c4 POP
}>
seqno PROC:<{
//
c4 PUSH // _0
CTOS // _1
32 PLDU // _3
}>
get_public_key PROC:<{
//
c4 PUSH // _1
CTOS // cs
64 LDU // _9 _8
NIP // cs
256 PLDU // _7
}>
get_wallet_params PROC:<{
//
c4 PUSH // _1
CTOS // ds
32 LDU // _6 ds
32 LDU // _6 _9 ds
256 LDU // _6 _9 _20 _19
DROP // stored_seqno stored_subwallet public_key
}>
get_storage_params PROC:<{
//
c4 PUSH // _1
CTOS // ds
320 PUSHINT // ds _14
LDSLICEX // _31 _30
NIP // ds
1 LDI // _16 ds
LDGRAMS // _16 _19 ds
32 LDU // _16 _19 _21 ds
64 LDU // _16 _19 _21 _24 ds
64 LDU // _16 _19 _21 _24 _41 _40
DROP // accept_new_contracts? rate_per_mb_day max_span minimal_file_size maximal_file_size
}>
get_storage_contract_address PROC:<{
// merkle_hash file_size client torrent_hash
get_storage_params CALLDICT // merkle_hash file_size client torrent_hash _13 _14 _15 _16 _17
2DROP
s2 POP // merkle_hash file_size client torrent_hash max_span rate_per_mb_day
s1 s3 s3 XCHG3 // merkle_hash file_size rate_per_mb_day max_span client torrent_hash
build_storage_contract_stateinit CALLDICT // state_init
calculate_address_by_stateinit CALLDICT // _12
}>
}END>c

View file

@ -0,0 +1,487 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "smc-util.h"
#include "td/utils/filesystem.h"
#include "keys/encryptor.h"
#include "smartcont/provider-code.h"
static void smc_forget(td::actor::ActorId<tonlib::TonlibClientWrapper> client, td::int64 id) {
auto query = create_tl_object<tonlib_api::smc_forget>(id);
td::actor::send_closure(client, &tonlib::TonlibClientWrapper::send_request<tonlib_api::smc_forget>, std::move(query),
[](td::Result<tonlib_api::object_ptr<tonlib_api::ok>> R) mutable {
if (R.is_error()) {
LOG(WARNING) << "smc_forget failed: " << R.move_as_error();
}
});
}
void run_get_method(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client, std::string method,
std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>> args,
td::Promise<std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>>> promise) {
LOG(DEBUG) << "Running get method " << method << " on " << address.to_string();
auto query =
create_tl_object<tonlib_api::smc_load>(create_tl_object<tonlib_api::accountAddress>(address.to_string()));
td::actor::send_closure(
client, &tonlib::TonlibClientWrapper::send_request<tonlib_api::smc_load>, std::move(query),
[client, method = std::move(method), args = std::move(args),
promise = std::move(promise)](td::Result<tonlib_api::object_ptr<tonlib_api::smc_info>> R) mutable {
TRY_RESULT_PROMISE(promise, obj, std::move(R));
auto query = create_tl_object<tonlib_api::smc_runGetMethod>(
obj->id_, create_tl_object<tonlib_api::smc_methodIdName>(std::move(method)), std::move(args));
td::actor::send_closure(
client, &tonlib::TonlibClientWrapper::send_request<tonlib_api::smc_runGetMethod>, std::move(query),
[client, id = obj->id_,
promise = std::move(promise)](td::Result<tonlib_api::object_ptr<tonlib_api::smc_runResult>> R) mutable {
smc_forget(client, id);
TRY_RESULT_PROMISE(promise, obj, std::move(R));
if (obj->exit_code_ != 0 && obj->exit_code_ != 1) {
promise.set_error(
td::Status::Error(PSTRING() << "Method execution finished with code " << obj->exit_code_));
return;
}
promise.set_result(std::move(obj->stack_));
});
});
}
void check_contract_exists(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::Promise<bool> promise) {
auto query =
create_tl_object<tonlib_api::smc_load>(create_tl_object<tonlib_api::accountAddress>(address.to_string()));
td::actor::send_closure(
client, &tonlib::TonlibClientWrapper::send_request<tonlib_api::smc_load>, std::move(query),
[client, promise = std::move(promise)](td::Result<tonlib_api::object_ptr<tonlib_api::smc_info>> R) mutable {
TRY_RESULT_PROMISE(promise, obj, std::move(R));
auto query = create_tl_object<tonlib_api::smc_getState>(obj->id_);
td::actor::send_closure(
client, &tonlib::TonlibClientWrapper::send_request<tonlib_api::smc_getState>, std::move(query),
[client, id = obj->id_,
promise = std::move(promise)](td::Result<tonlib_api::object_ptr<tonlib_api::tvm_cell>> R) mutable {
smc_forget(client, id);
TRY_RESULT_PROMISE(promise, r, std::move(R));
promise.set_result(!r->bytes_.empty());
});
});
}
void get_contract_balance(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::Promise<td::RefInt256> promise) {
auto query =
create_tl_object<tonlib_api::getAccountState>(create_tl_object<tonlib_api::accountAddress>(address.to_string()));
td::actor::send_closure(
client, &tonlib::TonlibClientWrapper::send_request<tonlib_api::getAccountState>, std::move(query),
promise.wrap([](tonlib_api::object_ptr<tonlib_api::fullAccountState> r) -> td::Result<td::RefInt256> {
return td::make_refint(r->balance_);
}));
}
FabricContractWrapper::FabricContractWrapper(ContractAddress address,
td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::actor::ActorId<keyring::Keyring> keyring,
td::unique_ptr<Callback> callback, td::uint64 last_processed_lt)
: address_(address)
, client_(std::move(client))
, keyring_(std::move(keyring))
, callback_(std::move(callback))
, last_processed_lt_(last_processed_lt) {
}
void FabricContractWrapper::start_up() {
alarm();
}
void FabricContractWrapper::alarm() {
if (process_transactions_at_ && process_transactions_at_.is_in_past()) {
process_transactions_at_ = td::Timestamp::never();
load_transactions();
}
alarm_timestamp().relax(process_transactions_at_);
if (send_message_at_ && send_message_at_.is_in_past()) {
send_message_at_ = td::Timestamp::never();
do_send_external_message();
}
alarm_timestamp().relax(send_message_at_);
}
void FabricContractWrapper::load_transactions() {
LOG(DEBUG) << "Loading transactions for " << address_.to_string() << ", last_lt=" << last_processed_lt_;
auto query =
create_tl_object<tonlib_api::getAccountState>(create_tl_object<tonlib_api::accountAddress>(address_.to_string()));
td::actor::send_closure(
client_, &tonlib::TonlibClientWrapper::send_request<tonlib_api::getAccountState>, std::move(query),
[SelfId = actor_id(this)](td::Result<tonlib_api::object_ptr<tonlib_api::fullAccountState>> R) mutable {
if (R.is_error()) {
td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, R.move_as_error());
return;
}
auto obj = R.move_as_ok();
td::actor::send_closure(SelfId, &FabricContractWrapper::load_last_transactions,
std::vector<tl_object_ptr<tonlib_api::raw_transaction>>(),
std::move(obj->last_transaction_id_), (td::uint32)obj->sync_utime_);
});
}
void FabricContractWrapper::load_last_transactions(std::vector<tl_object_ptr<tonlib_api::raw_transaction>> transactions,
tl_object_ptr<tonlib_api::internal_transactionId> next_id,
td::uint32 utime) {
if ((td::uint64)next_id->lt_ <= last_processed_lt_) {
loaded_last_transactions(std::make_pair(std::move(transactions), utime));
return;
}
auto query = create_tl_object<tonlib_api::raw_getTransactionsV2>(
nullptr, create_tl_object<tonlib_api::accountAddress>(address_.to_string()), std::move(next_id), 10, false);
td::actor::send_closure(
client_, &tonlib::TonlibClientWrapper::send_request<tonlib_api::raw_getTransactionsV2>, std::move(query),
[transactions = std::move(transactions), last_processed_lt = last_processed_lt_, SelfId = actor_id(this),
utime](td::Result<tonlib_api::object_ptr<tonlib_api::raw_transactions>> R) mutable {
if (R.is_error()) {
td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions, R.move_as_error());
return;
}
auto obj = R.move_as_ok();
for (auto& transaction : obj->transactions_) {
if ((td::uint64)transaction->transaction_id_->lt_ <= last_processed_lt ||
(double)transaction->utime_ < td::Clocks::system() - 86400 || transactions.size() >= 1000) {
LOG(DEBUG) << "Stopping loading transactions (too many or too old)";
td::actor::send_closure(SelfId, &FabricContractWrapper::loaded_last_transactions,
std::make_pair(std::move(transactions), utime));
return;
}
LOG(DEBUG) << "Adding trtansaction, lt=" << transaction->transaction_id_->lt_;
transactions.push_back(std::move(transaction));
}
td::actor::send_closure(SelfId, &FabricContractWrapper::load_last_transactions, std::move(transactions),
std::move(obj->previous_transaction_id_), utime);
});
}
void FabricContractWrapper::loaded_last_transactions(
td::Result<std::pair<std::vector<tl_object_ptr<tonlib_api::raw_transaction>>, td::uint32>> R) {
if (R.is_error()) {
LOG(ERROR) << "Error during loading last transactions: " << R.move_as_error();
alarm_timestamp().relax(process_transactions_at_ = td::Timestamp::in(30.0));
return;
}
auto r = R.move_as_ok();
auto transactions = std::move(r.first);
td::uint32 utime = r.second;
LOG(DEBUG) << "Finished loading " << transactions.size() << " transactions. sync_utime=" << utime;
std::reverse(transactions.begin(), transactions.end());
for (tl_object_ptr<tonlib_api::raw_transaction>& transaction : transactions) {
LOG(DEBUG) << "Processing transaction tl=" << transaction->transaction_id_->lt_;
last_processed_lt_ = transaction->transaction_id_->lt_;
// transaction->in_msg_->source_->account_address_.empty() - message is external
if (current_ext_message_ && current_ext_message_.value().sent &&
transaction->in_msg_->source_->account_address_.empty()) {
auto msg_data = dynamic_cast<tonlib_api::msg_dataRaw*>(transaction->in_msg_->msg_data_.get());
if (msg_data == nullptr) {
continue;
}
auto r_body = vm::std_boc_deserialize(msg_data->body_);
if (r_body.is_error()) {
LOG(WARNING) << "Invalid response from tonlib: " << r_body.move_as_error();
continue;
}
td::Ref<vm::Cell> body = r_body.move_as_ok();
vm::CellSlice cs(vm::NoVm(), body);
if (cs.size() < 512 + 96) {
continue;
}
cs.skip_first(512 + 64);
auto seqno = (td::uint32)cs.fetch_ulong(32);
if (seqno != current_ext_message_.value().seqno) {
continue;
}
if (current_ext_message_.value().ext_msg_body_hash != body->get_hash().bits()) {
do_send_external_message_finish(td::Status::Error("Another external message with the same seqno was accepted"));
continue;
}
do_send_external_message_finish(&transaction->out_msgs_);
}
}
for (tl_object_ptr<tonlib_api::raw_transaction>& transaction : transactions) {
callback_->on_transaction(std::move(transaction));
}
if (current_ext_message_ && current_ext_message_.value().sent && current_ext_message_.value().timeout < utime) {
do_send_external_message_finish(td::Status::Error("Timeout"));
}
alarm_timestamp().relax(process_transactions_at_ = td::Timestamp::in(10.0));
}
void FabricContractWrapper::run_get_method(
std::string method, std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>> args,
td::Promise<std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>>> promise) {
::run_get_method(address_, client_, std::move(method), std::move(args), std::move(promise));
}
void FabricContractWrapper::send_internal_message(ContractAddress dest, td::RefInt256 coins, vm::CellSlice body,
td::Promise<td::Unit> promise) {
td::Bits256 body_hash = vm::CellBuilder().append_cellslice(body).finalize_novm()->get_hash().bits();
LOG(DEBUG) << "send_internal_message " << address_.to_string() << " -> " << dest.to_string() << ", " << coins
<< " nanoTON, body=" << body_hash.to_hex();
CHECK(coins->sgn() >= 0);
pending_messages_.push(PendingMessage{dest, std::move(coins), std::move(body), body_hash, std::move(promise)});
if (!send_message_at_ && !current_ext_message_) {
alarm_timestamp().relax(send_message_at_ = td::Timestamp::in(1.0));
}
}
void FabricContractWrapper::do_send_external_message() {
CHECK(!current_ext_message_);
LOG(DEBUG) << "do_send_external message: " << pending_messages_.size() << " messages in queue";
if (pending_messages_.empty()) {
return;
}
current_ext_message_ = CurrentExtMessage();
while (current_ext_message_.value().int_msgs.size() < 4 && !pending_messages_.empty()) {
PendingMessage msg = std::move(pending_messages_.front());
current_ext_message_.value().int_msgs.push_back(std::move(msg));
pending_messages_.pop();
}
run_get_method(
"get_wallet_params", {},
[SelfId = actor_id(this)](td::Result<std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>>> R) {
td::uint32 seqno = 0;
td::uint32 subwallet_id = 0;
td::Bits256 public_key = td::Bits256::zero();
auto S = [&]() -> td::Status {
TRY_RESULT(stack, std::move(R));
if (stack.size() != 3) {
return td::Status::Error(PSTRING() << "Method returned " << stack.size() << " values, 3 expected");
}
TRY_RESULT_PREFIX_ASSIGN(seqno, entry_to_int<td::uint32>(stack[0]), "Invalid seqno: ");
TRY_RESULT_PREFIX_ASSIGN(subwallet_id, entry_to_int<td::uint32>(stack[1]), "Invalid subwallet_id: ");
TRY_RESULT_PREFIX_ASSIGN(public_key, entry_to_bits256(stack[2]), "Invalid public_key: ");
return td::Status::OK();
}();
if (S.is_error()) {
td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish,
S.move_as_error_prefix("Failed to get wallet params: "));
return;
}
td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_cont, seqno, subwallet_id,
public_key);
});
}
void FabricContractWrapper::do_send_external_message_cont(td::uint32 seqno, td::uint32 subwallet_id,
td::Bits256 public_key) {
LOG(DEBUG) << "Got wallet params: seqno=" << seqno << ", subwallet_id=" << subwallet_id
<< ", key=" << public_key.to_hex();
CHECK(current_ext_message_);
current_ext_message_.value().seqno = seqno;
current_ext_message_.value().timeout = (td::uint32)td::Clocks::system() + 45;
vm::CellBuilder b;
b.store_long(subwallet_id, 32); // subwallet id.
b.store_long(current_ext_message_.value().timeout, 32); // valid until
b.store_long(seqno, 32); // seqno
for (const PendingMessage& msg : current_ext_message_.value().int_msgs) {
vm::CellBuilder b2;
b2.store_long(3 << 2, 6); // 0 ihr_disabled:Bool bounce:Bool bounced:Bool src:MsgAddressInt
b2.append_cellslice(msg.dest.to_cellslice()); // dest:MsgAddressInt
store_coins(b2, msg.value); // grams:Grams
b2.store_zeroes(1 + 4 + 4 + 64 + 32 + 1); // extre, ihr_fee, fwd_fee, created_lt, created_at, init
// body:(Either X ^X)
if (b2.remaining_bits() >= 1 + msg.body.size() && b2.remaining_refs() >= msg.body.size_refs()) {
b2.store_zeroes(1);
b2.append_cellslice(msg.body);
} else {
b2.store_ones(1);
b2.store_ref(vm::CellBuilder().append_cellslice(msg.body).finalize_novm());
}
b.store_long(3, 8); // mode
b.store_ref(b2.finalize_novm()); // message
}
td::Ref<vm::Cell> to_sign = b.finalize_novm();
td::BufferSlice hash(to_sign->get_hash().as_slice());
LOG(DEBUG) << "Signing external message";
td::actor::send_closure(
keyring_, &keyring::Keyring::sign_message, PublicKey(pubkeys::Ed25519(public_key)).compute_short_id(),
std::move(hash), [SelfId = actor_id(this), data = std::move(to_sign)](td::Result<td::BufferSlice> R) mutable {
if (R.is_error()) {
td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish,
R.move_as_error_prefix("Failed to sign message: "));
return;
}
auto signature = R.move_as_ok();
CHECK(signature.size() == 64);
vm::CellBuilder b;
b.store_bytes(signature);
b.append_cellslice(vm::load_cell_slice(data));
td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_cont2, b.finalize_novm());
});
}
void FabricContractWrapper::do_send_external_message_cont2(td::Ref<vm::Cell> ext_msg_body) {
CHECK(current_ext_message_);
LOG(DEBUG) << "Signed external message, sending: seqno=" << current_ext_message_.value().seqno;
current_ext_message_.value().sent = true;
current_ext_message_.value().ext_msg_body_hash = ext_msg_body->get_hash().bits();
auto body = vm::std_boc_serialize(ext_msg_body).move_as_ok().as_slice().str();
auto query = create_tl_object<tonlib_api::raw_createAndSendMessage>(
create_tl_object<tonlib_api::accountAddress>(address_.to_string()), "", std::move(body));
td::actor::send_closure(client_, &tonlib::TonlibClientWrapper::send_request<tonlib_api::raw_createAndSendMessage>,
std::move(query), [SelfId = actor_id(this)](td::Result<tl_object_ptr<tonlib_api::ok>> R) {
if (R.is_error()) {
td::actor::send_closure(SelfId, &FabricContractWrapper::do_send_external_message_finish,
R.move_as_error_prefix("Failed to send message: "));
} else {
LOG(DEBUG) << "External message was sent to liteserver";
}
});
}
void FabricContractWrapper::do_send_external_message_finish(
td::Result<const std::vector<tl_object_ptr<tonlib_api::raw_message>>*> R) {
CHECK(current_ext_message_);
if (R.is_error()) {
LOG(DEBUG) << "Failed to send external message seqno=" << current_ext_message_.value().seqno << ": " << R.error();
for (auto& msg : current_ext_message_.value().int_msgs) {
msg.promise.set_error(R.error().clone());
}
} else {
LOG(DEBUG) << "External message seqno=" << current_ext_message_.value().seqno << " was sent";
const auto& out_msgs = *R.ok();
auto& msgs = current_ext_message_.value().int_msgs;
for (const auto& out_msg : out_msgs) {
ContractAddress dest = ContractAddress::parse(out_msg->destination_->account_address_).move_as_ok();
td::RefInt256 value = td::make_refint((td::uint64)out_msg->value_);
td::Bits256 body_hash;
body_hash.as_slice().copy_from(out_msg->body_hash_);
bool found = false;
for (size_t i = 0; i < msgs.size(); ++i) {
if (msgs[i].dest == dest && msgs[i].value->cmp(*value) == 0 && msgs[i].body_hash == body_hash) {
LOG(DEBUG) << "Internal message was sent dest=" << dest.to_string() << ", value=" << value
<< ", body_hash=" << body_hash.to_hex();
msgs[i].promise.set_result(td::Unit());
msgs.erase(msgs.begin() + i);
found = true;
break;
}
}
if (!found) {
LOG(DEBUG) << "Unexpected internal message was sent: dest=" << dest.to_string() << " value=" << value
<< " body_hash=" << body_hash;
}
}
for (auto& msg : msgs) {
LOG(DEBUG) << "Internal message WAS NOT SENT dest=" << msg.dest.to_string() << ", value=" << msg.value
<< ", body_hash=" << msg.body_hash.to_hex();
msg.promise.set_result(td::Status::Error("External message was accepted, but internal message was not sent"));
}
}
current_ext_message_ = {};
if (!pending_messages_.empty()) {
do_send_external_message();
}
}
bool store_coins(vm::CellBuilder& b, const td::RefInt256& x) {
unsigned len = (((unsigned)x->bit_size(false) + 7) >> 3);
if (len >= 16) {
return false;
}
return b.store_long_bool(len, 4) && b.store_int256_bool(*x, len * 8, false);
}
bool store_coins(vm::CellBuilder& b, td::uint64 x) {
return store_coins(b, td::make_refint(x));
}
td::Result<FabricContractInit> generate_fabric_contract(td::actor::ActorId<keyring::Keyring> keyring) {
auto private_key = PrivateKey{privkeys::Ed25519::random()};
td::Bits256 public_key = private_key.compute_public_key().ed25519_value().raw();
td::Slice code_boc(STORAGE_PROVIDER_CODE, sizeof(STORAGE_PROVIDER_CODE));
TRY_RESULT(code, vm::std_boc_deserialize(code_boc));
LOG(DEBUG) << "Generating storage provider state init. code_hash=" << code->get_hash().to_hex()
<< " public_key=" << public_key.to_hex();
vm::CellBuilder b;
b.store_long(0, 32); // seqno
b.store_long(0, 32); // subwallet_id
b.store_bytes(public_key.as_slice()); // public_key
b.store_long(0, 1); // accept_new_contracts (false by default)
store_coins(b, 1'000'000); // rate_per_mb_day
b.store_long(86400, 32); // max_span
b.store_long(1 << 20, 64); // min_file_size
b.store_long(1 << 30, 64); // max_file_size
td::Ref<vm::Cell> data = b.finalize_novm();
// _ split_depth:(Maybe (## 5)) special:(Maybe TickTock)
// code:(Maybe ^Cell) data:(Maybe ^Cell)
// library:(HashmapE 256 SimpleLib) = StateInit;
td::Ref<vm::Cell> state_init =
vm::CellBuilder().store_long(0b00110, 5).store_ref(std::move(code)).store_ref(std::move(data)).finalize_novm();
ContractAddress address{basechainId, state_init->get_hash().bits()};
// Message body
b = vm::CellBuilder();
b.store_long(0, 32); // subwallet_id
b.store_long((td::uint32)td::Clocks::system() + 3600 * 24 * 7, 32); // valid_until
b.store_long(0, 32); // seqno
td::Ref<vm::Cell> to_sign = b.finalize_novm();
TRY_RESULT(decryptor, private_key.create_decryptor());
TRY_RESULT(signature, decryptor->sign(to_sign->get_hash().as_slice()));
CHECK(signature.size() == 64);
td::Ref<vm::Cell> msg_body =
vm::CellBuilder().store_bytes(signature).append_cellslice(vm::CellSlice(vm::NoVm(), to_sign)).finalize_novm();
td::actor::send_closure(keyring, &keyring::Keyring::add_key, private_key, false,
[](td::Result<td::Unit> R) { R.ensure(); });
return FabricContractInit{address, state_init, msg_body};
}
td::Ref<vm::Cell> create_new_contract_message_body(td::Ref<vm::Cell> info, td::Bits256 microchunk_hash,
td::uint64 query_id, td::RefInt256 rate, td::uint32 max_span) {
// new_storage_contract#00000001 query_id:uint64 info:(^ TorrentInfo) microchunk_hash:uint256
// expected_rate:Coins expected_max_span:uint32 = NewStorageContract;
vm::CellBuilder b;
b.store_long(0x107c49ef, 32); // const op::offer_storage_contract = 0x107c49ef;
b.store_long(query_id, 64);
b.store_ref(std::move(info));
b.store_bytes(microchunk_hash.as_slice());
store_coins(b, rate);
b.store_long(max_span, 32);
return b.finalize_novm();
}
void get_storage_contract_data(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::Promise<StorageContractData> promise) {
run_get_method(
address, client, "get_storage_contract_data", {},
promise.wrap([](std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>> stack) -> td::Result<StorageContractData> {
if (stack.size() < 11) {
return td::Status::Error("Too few entries");
}
// active, balance, provider, merkle_hash, file_size, next_proof, rate_per_mb_day, max_span, last_proof_time,
// client, torrent_hash
TRY_RESULT(active, entry_to_int<int>(stack[0]));
TRY_RESULT(balance, entry_to_int<td::RefInt256>(stack[1]));
TRY_RESULT(microchunk_hash, entry_to_bits256(stack[3]));
TRY_RESULT(file_size, entry_to_int<td::uint64>(stack[4]));
TRY_RESULT(next_proof, entry_to_int<td::uint64>(stack[5]));
TRY_RESULT(rate_per_mb_day, entry_to_int<td::RefInt256>(stack[6]));
TRY_RESULT(max_span, entry_to_int<td::uint32>(stack[7]));
TRY_RESULT(last_proof_time, entry_to_int<td::uint32>(stack[8]));
TRY_RESULT(torrent_hash, entry_to_bits256(stack[10]));
return StorageContractData{(bool)active, balance, microchunk_hash, file_size, next_proof,
rate_per_mb_day, max_span, last_proof_time, torrent_hash};
}));
}

View file

@ -0,0 +1,184 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include "ton/ton-types.h"
#include "crypto/vm/cellslice.h"
#include "block/block-parse.h"
#include "td/actor/actor.h"
#include "tonlib/tonlib/TonlibClientWrapper.h"
#include <queue>
#include "keyring/keyring.h"
using namespace ton;
struct ContractAddress {
WorkchainId wc = workchainIdNotYet;
td::Bits256 addr = td::Bits256::zero();
ContractAddress() = default;
ContractAddress(WorkchainId wc, td::Bits256 addr) : wc(wc), addr(addr) {
}
std::string to_string() const {
return PSTRING() << wc << ":" << addr.to_hex();
}
td::Ref<vm::CellSlice> to_cellslice() const {
return block::tlb::t_MsgAddressInt.pack_std_address(wc, addr);
}
static td::Result<ContractAddress> parse(td::Slice s) {
TRY_RESULT(x, block::StdAddress::parse(s));
return ContractAddress(x.workchain, x.addr);
}
bool operator==(const ContractAddress& other) const {
return wc == other.wc && addr == other.addr;
}
bool operator!=(const ContractAddress& other) const {
return !(*this == other);
}
bool operator<(const ContractAddress& other) const {
return wc == other.wc ? addr < other.addr : wc < other.wc;
}
};
void run_get_method(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client, std::string method,
std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>> args,
td::Promise<std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>>> promise);
void check_contract_exists(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::Promise<bool> promise);
void get_contract_balance(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::Promise<td::RefInt256> promise);
class FabricContractWrapper : public td::actor::Actor {
public:
class Callback {
public:
virtual ~Callback() = default;
virtual void on_transaction(tl_object_ptr<tonlib_api::raw_transaction> transaction) = 0;
};
explicit FabricContractWrapper(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::actor::ActorId<keyring::Keyring> keyring, td::unique_ptr<Callback> callback,
td::uint64 last_processed_lt);
void start_up() override;
void alarm() override;
void run_get_method(std::string method, std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>> args,
td::Promise<std::vector<tl_object_ptr<tonlib_api::tvm_StackEntry>>> promise);
void send_internal_message(ContractAddress dest, td::RefInt256 coins, vm::CellSlice body,
td::Promise<td::Unit> promise);
private:
ContractAddress address_;
td::actor::ActorId<tonlib::TonlibClientWrapper> client_;
td::actor::ActorId<keyring::Keyring> keyring_;
td::unique_ptr<Callback> callback_;
td::Timestamp process_transactions_at_ = td::Timestamp::now();
td::uint64 last_processed_lt_ = 0;
struct PendingMessage {
ContractAddress dest;
td::RefInt256 value;
vm::CellSlice body;
td::Bits256 body_hash;
td::Promise<td::Unit> promise;
};
struct CurrentExtMessage {
std::vector<PendingMessage> int_msgs;
td::uint32 seqno = 0;
bool sent = false;
td::Bits256 ext_msg_body_hash = td::Bits256::zero();
td::uint32 timeout = 0;
};
std::queue<PendingMessage> pending_messages_;
td::Timestamp send_message_at_ = td::Timestamp::never();
td::optional<CurrentExtMessage> current_ext_message_;
void load_transactions();
void load_last_transactions(std::vector<tl_object_ptr<tonlib_api::raw_transaction>> transactions,
tl_object_ptr<tonlib_api::internal_transactionId> next_id, td::uint32 utime);
void loaded_last_transactions(
td::Result<std::pair<std::vector<tl_object_ptr<tonlib_api::raw_transaction>>, td::uint32>> R);
void do_send_external_message();
void do_send_external_message_cont(td::uint32 seqno, td::uint32 subwallet_id, td::Bits256 public_key);
void do_send_external_message_cont2(td::Ref<vm::Cell> ext_msg_body);
void do_send_external_message_finish(td::Result<const std::vector<tl_object_ptr<tonlib_api::raw_message>>*> R);
};
template <typename T>
inline td::Result<T> entry_to_int(const tl_object_ptr<tonlib_api::tvm_StackEntry>& entry) {
auto num = dynamic_cast<tonlib_api::tvm_stackEntryNumber*>(entry.get());
if (num == nullptr) {
return td::Status::Error("Unexpected value type");
}
return td::to_integer_safe<T>(num->number_->number_);
}
template <>
inline td::Result<td::RefInt256> entry_to_int<td::RefInt256>(const tl_object_ptr<tonlib_api::tvm_StackEntry>& entry) {
auto num = dynamic_cast<tonlib_api::tvm_stackEntryNumber*>(entry.get());
if (num == nullptr) {
return td::Status::Error("Unexpected value type");
}
auto x = td::dec_string_to_int256(num->number_->number_);
if (x.is_null()) {
return td::Status::Error("Invalid integer value");
}
return x;
}
inline td::Result<td::Bits256> entry_to_bits256(const tl_object_ptr<tonlib_api::tvm_StackEntry>& entry) {
TRY_RESULT(x, entry_to_int<td::RefInt256>(entry));
td::Bits256 bits;
if (!x->export_bytes(bits.data(), 32, false)) {
return td::Status::Error("Invalid int256");
}
return bits;
}
bool store_coins(vm::CellBuilder& b, const td::RefInt256& x);
bool store_coins(vm::CellBuilder& b, td::uint64 x);
struct FabricContractInit {
ContractAddress address;
td::Ref<vm::Cell> state_init;
td::Ref<vm::Cell> msg_body;
};
td::Result<FabricContractInit> generate_fabric_contract(td::actor::ActorId<keyring::Keyring> keyring);
td::Ref<vm::Cell> create_new_contract_message_body(td::Ref<vm::Cell> info, td::Bits256 microchunk_hash,
td::uint64 query_id, td::RefInt256 rate, td::uint32 max_span);
struct StorageContractData {
bool active;
td::RefInt256 balance;
td::Bits256 microchunk_hash;
td::uint64 file_size;
td::uint64 next_proof;
td::RefInt256 rate_per_mb_day;
td::uint32 max_span;
td::uint32 last_proof_time;
td::Bits256 torrent_hash;
};
void get_storage_contract_data(ContractAddress address, td::actor::ActorId<tonlib::TonlibClientWrapper> client,
td::Promise<StorageContractData> promise);

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,937 @@
/*
This file is part of TON Blockchain Library.
TON Blockchain Library is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
TON Blockchain Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
*/
#include "td/utils/filesystem.h"
#include "td/actor/actor.h"
#include "td/actor/MultiPromise.h"
#include "td/utils/OptionParser.h"
#include "td/utils/port/path.h"
#include "td/utils/port/signals.h"
#include "td/utils/port/user.h"
#include "td/utils/port/IPAddress.h"
#include "td/utils/Random.h"
#include "td/utils/FileLog.h"
#include "checksum.h"
#include "git.h"
#include "auto/tl/ton_api_json.h"
#include "common/delay.h"
#include "adnl/adnl.h"
#include "rldp2/rldp.h"
#include "dht/dht.h"
#include "overlay/overlays.h"
#include "Torrent.h"
#include "TorrentCreator.h"
#include "StorageManager.h"
#include "StorageProvider.h"
#if TD_DARWIN || TD_LINUX
#include <unistd.h>
#endif
#include <iostream>
using namespace ton;
td::BufferSlice create_query_error(td::CSlice message) {
return create_serialize_tl_object<ton_api::storage_daemon_queryError>(message.str());
}
td::BufferSlice create_query_error(td::Status error) {
return create_query_error(error.message());
}
class StorageDaemon : public td::actor::Actor {
public:
StorageDaemon(td::IPAddress ip_addr, bool client_mode, std::string global_config, std::string db_root,
td::uint16 control_port, bool enable_storage_provider)
: ip_addr_(ip_addr)
, client_mode_(client_mode)
, global_config_(std::move(global_config))
, db_root_(std::move(db_root))
, control_port_(control_port)
, enable_storage_provider_(enable_storage_provider) {
}
void start_up() override {
CHECK(db_root_ != "");
td::mkdir(db_root_).ensure();
keyring_ = keyring::Keyring::create(db_root_ + "/keyring");
{
auto S = load_global_config();
if (S.is_error()) {
LOG(FATAL) << "Failed to load global config: " << S;
}
}
{
auto S = load_daemon_config();
if (S.is_error()) {
LOG(FATAL) << "Failed to load daemon config: " << S;
}
}
init_adnl();
class Callback : public StorageManager::Callback {
public:
explicit Callback(td::actor::ActorId<StorageDaemon> actor) : actor_(std::move(actor)) {
}
void on_ready() override {
td::actor::send_closure(actor_, &StorageDaemon::inited_storage_manager);
}
private:
td::actor::ActorId<StorageDaemon> actor_;
};
manager_ = td::actor::create_actor<StorageManager>("storage", local_id_, db_root_ + "/torrent",
td::make_unique<Callback>(actor_id(this)), client_mode_,
adnl_.get(), rldp_.get(), overlays_.get());
}
td::Status load_global_config() {
TRY_RESULT_PREFIX(conf_data, td::read_file(global_config_), "failed to read: ");
TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: ");
ton_api::config_global conf;
TRY_STATUS_PREFIX(ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: ");
if (!conf.dht_) {
return td::Status::Error(ErrorCode::error, "does not contain [dht] section");
}
TRY_RESULT_PREFIX(dht, dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: ");
dht_config_ = std::move(dht);
return td::Status::OK();
}
td::Status load_daemon_config() {
daemon_config_ = create_tl_object<ton_api::storage_daemon_config>();
auto r_conf_data = td::read_file(daemon_config_file());
if (r_conf_data.is_ok()) {
auto conf_data = r_conf_data.move_as_ok();
TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: ");
TRY_STATUS_PREFIX(ton_api::from_json(*daemon_config_, conf_json.get_object()), "json does not fit TL scheme: ");
return td::Status::OK();
}
std::string keys_dir = db_root_ + "/cli-keys/";
LOG(INFO) << "First launch, storing keys for storage-daemon-cli to " << keys_dir;
td::mkdir(keys_dir).ensure();
auto generate_public_key = [&]() -> PublicKey {
auto pk = PrivateKey{privkeys::Ed25519::random()};
auto pub = pk.compute_public_key();
td::actor::send_closure(keyring_, &keyring::Keyring::add_key, std::move(pk), false, [](td::Unit) {});
return pub;
};
{
// Server key
daemon_config_->server_key_ = generate_public_key().tl();
TRY_STATUS(td::write_file(keys_dir + "server.pub", serialize_tl_object(daemon_config_->server_key_, true)));
}
{
// Client key
auto pk = PrivateKey{privkeys::Ed25519::random()};
daemon_config_->cli_key_hash_ = pk.compute_short_id().bits256_value();
TRY_STATUS(td::write_file(keys_dir + "client", serialize_tl_object(pk.tl(), true)));
}
daemon_config_->adnl_id_ = generate_public_key().tl();
daemon_config_->dht_id_ = generate_public_key().tl();
return save_daemon_config();
}
td::Status save_daemon_config() {
auto s = td::json_encode<std::string>(td::ToJson(*daemon_config_), true);
TRY_STATUS_PREFIX(td::write_file(daemon_config_file(), s), "Failed to write daemon config: ");
return td::Status::OK();
}
void init_adnl() {
CHECK(ip_addr_.is_valid());
adnl_network_manager_ = adnl::AdnlNetworkManager::create(static_cast<td::uint16>(ip_addr_.get_port()));
adnl_ = adnl::Adnl::create(db_root_, keyring_.get());
td::actor::send_closure(adnl_, &adnl::Adnl::register_network_manager, adnl_network_manager_.get());
adnl::AdnlCategoryMask cat_mask;
cat_mask[0] = true;
td::actor::send_closure(adnl_network_manager_, &adnl::AdnlNetworkManager::add_self_addr, ip_addr_,
std::move(cat_mask), 0);
adnl::AdnlAddressList addr_list;
if (!client_mode_) {
addr_list.add_udp_address(ip_addr_).ensure();
}
addr_list.set_version(static_cast<td::int32>(td::Clocks::system()));
addr_list.set_reinit_date(adnl::Adnl::adnl_start_time());
adnl::AdnlNodeIdFull local_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->adnl_id_).move_as_ok();
local_id_ = local_id_full.compute_short_id();
td::actor::send_closure(adnl_, &adnl::Adnl::add_id, local_id_full, addr_list, static_cast<td::uint8>(0));
adnl::AdnlNodeIdFull dht_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->dht_id_).move_as_ok();
dht_id_ = dht_id_full.compute_short_id();
td::actor::send_closure(adnl_, &adnl::Adnl::add_id, dht_id_full, addr_list, static_cast<td::uint8>(0));
if (client_mode_) {
auto D = dht::Dht::create_client(dht_id_, db_root_, dht_config_, keyring_.get(), adnl_.get());
D.ensure();
dht_ = D.move_as_ok();
} else {
auto D = dht::Dht::create(dht_id_, db_root_, dht_config_, keyring_.get(), adnl_.get());
D.ensure();
dht_ = D.move_as_ok();
}
td::actor::send_closure(adnl_, &adnl::Adnl::register_dht_node, dht_.get());
rldp_ = ton_rldp::Rldp::create(adnl_.get());
td::actor::send_closure(rldp_, &ton_rldp::Rldp::add_id, local_id_);
overlays_ = overlay::Overlays::create(db_root_, keyring_.get(), adnl_.get(), dht_.get());
}
void inited_storage_manager() {
if (enable_storage_provider_) {
if (!daemon_config_->provider_address_.empty()) {
auto provider_account = ContractAddress::parse(daemon_config_->provider_address_).move_as_ok();
init_tonlib_client();
provider_ = td::actor::create_actor<StorageProvider>("provider", provider_account, db_root_ + "/provider",
tonlib_client_.get(), manager_.get(), keyring_.get());
} else {
LOG(WARNING) << "Storage provider account is not set, it can be set in storage-daemon-cli";
}
}
init_control_interface();
}
void init_control_interface() {
if (control_port_ == 0) {
return;
}
auto adnl_id_full = adnl::AdnlNodeIdFull::create(daemon_config_->server_key_).move_as_ok();
auto adnl_id = adnl_id_full.compute_short_id();
td::actor::send_closure(adnl_, &adnl::Adnl::add_id, adnl_id_full, adnl::AdnlAddressList(),
static_cast<td::uint8>(255));
class Callback : public adnl::Adnl::Callback {
public:
explicit Callback(td::actor::ActorId<StorageDaemon> id) : self_id_(id) {
}
void receive_message(adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data) override {
}
void receive_query(adnl::AdnlNodeIdShort src, adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
td::actor::send_closure(self_id_, &StorageDaemon::process_control_query, src, std::move(data),
std::move(promise));
}
private:
td::actor::ActorId<StorageDaemon> self_id_;
};
td::actor::send_closure(adnl_, &adnl::Adnl::subscribe, adnl_id, "", std::make_unique<Callback>(actor_id(this)));
td::actor::send_closure(adnl_, &adnl::Adnl::create_ext_server, std::vector<adnl::AdnlNodeIdShort>{adnl_id},
std::vector<td::uint16>{control_port_},
[SelfId = actor_id(this)](td::Result<td::actor::ActorOwn<adnl::AdnlExtServer>> R) {
if (R.is_error()) {
LOG(ERROR) << "Failed to init control interface: " << R.move_as_error();
return;
}
td::actor::send_closure(SelfId, &StorageDaemon::created_ext_server, R.move_as_ok());
});
}
void created_ext_server(td::actor::ActorOwn<adnl::AdnlExtServer> ext_server) {
ext_server_ = std::move(ext_server);
LOG(INFO) << "Started control interface on port " << control_port_;
}
void process_control_query(adnl::AdnlNodeIdShort src, td::BufferSlice data, td::Promise<td::BufferSlice> promise) {
promise = [promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
if (R.is_error()) {
promise.set_value(create_query_error(R.move_as_error()));
} else {
promise.set_value(R.move_as_ok());
}
};
if (src.bits256_value() != daemon_config_->cli_key_hash_) {
promise.set_error(td::Status::Error("Not authorized"));
return;
}
auto F = fetch_tl_object<ton_api::Function>(data, true);
if (F.is_error()) {
promise.set_error(F.move_as_error_prefix("failed to parse control query: "));
return;
}
auto f = F.move_as_ok();
LOG(DEBUG) << "Running control query " << f->get_id();
ton_api::downcast_call(*f, [&](auto &obj) { run_control_query(obj, std::move(promise)); });
}
void run_control_query(ton_api::storage_daemon_setVerbosity &query, td::Promise<td::BufferSlice> promise) {
if (query.verbosity_ < 0 || query.verbosity_ > 10) {
promise.set_value(create_query_error("verbosity should be in range [0..10]"));
return;
}
SET_VERBOSITY_LEVEL(VERBOSITY_NAME(FATAL) + query.verbosity_);
promise.set_result(create_serialize_tl_object<ton_api::storage_daemon_success>());
}
void run_control_query(ton_api::storage_daemon_createTorrent &query, td::Promise<td::BufferSlice> promise) {
// Run in a separate thread
delay_action(
[promise = std::move(promise), manager = manager_.get(), query = std::move(query)]() mutable {
Torrent::Creator::Options options;
options.piece_size = 128 * 1024;
options.description = std::move(query.description_);
TRY_RESULT_PROMISE(promise, torrent, Torrent::Creator::create_from_path(std::move(options), query.path_));
td::Bits256 hash = torrent.get_hash();
td::actor::send_closure(manager, &StorageManager::add_torrent, std::move(torrent), false,
[manager, hash, promise = std::move(promise)](td::Result<td::Unit> R) mutable {
if (R.is_error()) {
promise.set_error(R.move_as_error());
} else {
get_torrent_info_full_serialized(manager, hash, std::move(promise));
}
});
},
td::Timestamp::now());
}
void run_control_query(ton_api::storage_daemon_addByHash &query, td::Promise<td::BufferSlice> promise) {
td::Bits256 hash = query.hash_;
bool start_download_now = query.start_download_ && query.priorities_.empty();
td::actor::send_closure(
manager_, &StorageManager::add_torrent_by_hash, hash, std::move(query.root_dir_), start_download_now,
query_add_torrent_cont(hash, query.start_download_, std::move(query.priorities_), std::move(promise)));
}
void run_control_query(ton_api::storage_daemon_addByMeta &query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, meta, TorrentMeta::deserialize(query.meta_));
td::Bits256 hash(meta.info.get_hash());
bool start_download_now = query.start_download_ && query.priorities_.empty();
td::actor::send_closure(
manager_, &StorageManager::add_torrent_by_meta, std::move(meta), std::move(query.root_dir_), start_download_now,
query_add_torrent_cont(hash, query.start_download_, std::move(query.priorities_), std::move(promise)));
}
td::Promise<td::Unit> query_add_torrent_cont(td::Bits256 hash, bool start_download,
std::vector<tl_object_ptr<ton_api::storage_PriorityAction>> priorities,
td::Promise<td::BufferSlice> promise) {
return [manager = manager_.get(), hash, start_download = start_download, priorities = std::move(priorities),
promise = std::move(promise)](td::Result<td::Unit> R) mutable {
if (R.is_error()) {
promise.set_error(R.move_as_error());
return;
}
if (!priorities.empty()) {
for (auto &p : priorities) {
ton_api::downcast_call(
*p, td::overloaded(
[&](ton_api::storage_priorityAction_all &obj) {
td::actor::send_closure(manager, &StorageManager::set_all_files_priority, hash,
(td::uint8)obj.priority_, [](td::Result<bool>) {});
},
[&](ton_api::storage_priorityAction_idx &obj) {
td::actor::send_closure(manager, &StorageManager::set_file_priority_by_idx, hash, obj.idx_,
(td::uint8)obj.priority_, [](td::Result<bool>) {});
},
[&](ton_api::storage_priorityAction_name &obj) {
td::actor::send_closure(manager, &StorageManager::set_file_priority_by_name, hash,
std::move(obj.name_), (td::uint8)obj.priority_,
[](td::Result<bool>) {});
}));
}
if (start_download) {
td::actor::send_closure(manager, &StorageManager::set_active_download, hash, true,
[](td::Result<td::Unit>) {});
}
}
get_torrent_info_full_serialized(manager, hash, std::move(promise));
};
}
void run_control_query(ton_api::storage_daemon_setActiveDownload &query, td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(
manager_, &StorageManager::set_active_download, query.hash_, query.active_,
promise.wrap([](td::Unit &&) { return create_serialize_tl_object<ton_api::storage_daemon_success>(); }));
}
void run_control_query(ton_api::storage_daemon_getTorrents &query, td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(
manager_, &StorageManager::get_all_torrents,
[manager = manager_.get(), promise = std::move(promise)](td::Result<std::vector<td::Bits256>> R) mutable {
if (R.is_error()) {
promise.set_error(R.move_as_error());
return;
}
std::vector<td::Bits256> torrents = R.move_as_ok();
auto result = std::make_shared<std::vector<tl_object_ptr<ton_api::storage_daemon_torrent>>>(torrents.size());
td::MultiPromise mp;
auto ig = mp.init_guard();
for (size_t i = 0; i < torrents.size(); ++i) {
get_torrent_info_short(manager, torrents[i],
[i, result, promise = ig.get_promise()](
td::Result<tl_object_ptr<ton_api::storage_daemon_torrent>> R) mutable {
if (R.is_ok()) {
result->at(i) = R.move_as_ok();
}
promise.set_result(td::Unit());
});
}
ig.add_promise([promise = std::move(promise), result](td::Result<td::Unit> R) mutable {
if (R.is_error()) {
promise.set_error(R.move_as_error());
return;
}
auto v = std::move(*result);
v.erase(std::remove(v.begin(), v.end(), nullptr), v.end());
promise.set_result(create_serialize_tl_object<ton_api::storage_daemon_torrentList>(std::move(v)));
});
});
}
void run_control_query(ton_api::storage_daemon_getTorrentFull &query, td::Promise<td::BufferSlice> promise) {
get_torrent_info_full_serialized(manager_.get(), query.hash_, std::move(promise));
}
void run_control_query(ton_api::storage_daemon_getTorrentMeta &query, td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(
manager_, &StorageManager::with_torrent, query.hash_,
promise.wrap([](NodeActor::NodeState state) -> td::Result<td::BufferSlice> {
Torrent &torrent = state.torrent;
if (!torrent.inited_info()) {
return td::Status::Error("Torrent meta is not available");
}
std::string meta_str = torrent.get_meta(Torrent::GetMetaOptions().with_proof_depth_limit(10)).serialize();
return create_serialize_tl_object<ton_api::storage_daemon_torrentMeta>(td::BufferSlice(meta_str));
}));
}
void run_control_query(ton_api::storage_daemon_getTorrentPeers &query, td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(manager_, &StorageManager::get_peers_info, query.hash_,
promise.wrap([](tl_object_ptr<ton_api::storage_daemon_peerList> obj) -> td::BufferSlice {
return serialize_tl_object(obj, true);
}));
}
void run_control_query(ton_api::storage_daemon_setFilePriorityAll &query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe<td::uint8>(query.priority_));
td::actor::send_closure(manager_, &StorageManager::set_all_files_priority, query.hash_, priority,
promise.wrap([](bool done) -> td::Result<td::BufferSlice> {
if (done) {
return create_serialize_tl_object<ton_api::storage_daemon_prioritySet>();
} else {
return create_serialize_tl_object<ton_api::storage_daemon_priorityPending>();
}
}));
}
void run_control_query(ton_api::storage_daemon_setFilePriorityByIdx &query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe<td::uint8>(query.priority_));
td::actor::send_closure(manager_, &StorageManager::set_file_priority_by_idx, query.hash_, query.idx_, priority,
promise.wrap([](bool done) -> td::Result<td::BufferSlice> {
if (done) {
return create_serialize_tl_object<ton_api::storage_daemon_prioritySet>();
} else {
return create_serialize_tl_object<ton_api::storage_daemon_priorityPending>();
}
}));
}
void run_control_query(ton_api::storage_daemon_setFilePriorityByName &query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, priority, td::narrow_cast_safe<td::uint8>(query.priority_));
td::actor::send_closure(manager_, &StorageManager::set_file_priority_by_name, query.hash_, std::move(query.name_),
priority, promise.wrap([](bool done) -> td::Result<td::BufferSlice> {
if (done) {
return create_serialize_tl_object<ton_api::storage_daemon_prioritySet>();
} else {
return create_serialize_tl_object<ton_api::storage_daemon_priorityPending>();
}
}));
}
void run_control_query(ton_api::storage_daemon_removeTorrent &query, td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(
manager_, &StorageManager::remove_torrent, query.hash_, query.remove_files_,
promise.wrap([](td::Unit &&) { return create_serialize_tl_object<ton_api::storage_daemon_success>(); }));
}
void run_control_query(ton_api::storage_daemon_loadFrom &query, td::Promise<td::BufferSlice> promise) {
td::optional<TorrentMeta> meta;
if (!query.meta_.empty()) {
TRY_RESULT_PROMISE_ASSIGN(promise, meta, TorrentMeta::deserialize(query.meta_));
}
td::actor::send_closure(
manager_, &StorageManager::load_from, query.hash_, std::move(meta), std::move(query.path_),
[manager = manager_.get(), hash = query.hash_, promise = std::move(promise)](td::Result<td::Unit> R) mutable {
if (R.is_error()) {
promise.set_error(R.move_as_error());
} else {
get_torrent_info_short(manager, hash, promise.wrap([](tl_object_ptr<ton_api::storage_daemon_torrent> obj) {
return serialize_tl_object(obj, true);
}));
}
});
}
void run_control_query(ton_api::storage_daemon_getNewContractMessage &query, td::Promise<td::BufferSlice> promise) {
td::Promise<std::pair<td::RefInt256, td::uint32>> P =
[promise = std::move(promise), hash = query.hash_, query_id = query.query_id_,
manager = manager_.get()](td::Result<std::pair<td::RefInt256, td::uint32>> R) mutable {
TRY_RESULT_PROMISE(promise, r, std::move(R));
td::actor::send_closure(
manager, &StorageManager::with_torrent, hash,
promise.wrap([r = std::move(r), query_id](NodeActor::NodeState state) -> td::Result<td::BufferSlice> {
Torrent &torrent = state.torrent;
if (!torrent.is_completed()) {
return td::Status::Error("Torrent is not complete");
}
TRY_RESULT(microchunk_tree, MicrochunkTree::Builder::build_for_torrent(torrent, 1LL << 60));
td::Ref<vm::Cell> msg = create_new_contract_message_body(
torrent.get_info().as_cell(), microchunk_tree.get_root_hash(), query_id, r.first, r.second);
return create_serialize_tl_object<ton_api::storage_daemon_newContractMessage>(
vm::std_boc_serialize(msg).move_as_ok(), r.first->to_dec_string(), r.second);
}));
};
ton_api::downcast_call(*query.params_,
td::overloaded(
[&](ton_api::storage_daemon_newContractParams &obj) {
td::RefInt256 rate = td::string_to_int256(obj.rate_);
if (rate.is_null() || rate->sgn() < 0) {
P.set_error(td::Status::Error("Invalid rate"));
return;
}
P.set_result(std::make_pair(std::move(rate), (td::uint32)obj.max_span_));
},
[&](ton_api::storage_daemon_newContractParamsAuto &obj) {
TRY_RESULT_PROMISE(P, address, ContractAddress::parse(obj.provider_address_));
init_tonlib_client();
StorageProvider::get_provider_params(
tonlib_client_.get(), address, P.wrap([](ProviderParams params) {
return std::make_pair(std::move(params.rate_per_mb_day), params.max_span);
}));
}));
}
void run_control_query(ton_api::storage_daemon_importPrivateKey &query, td::Promise<td::BufferSlice> promise) {
auto pk = ton::PrivateKey{query.key_};
td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false,
promise.wrap([hash = pk.compute_short_id()](td::Unit) mutable {
return create_serialize_tl_object<ton_api::storage_daemon_keyHash>(hash.bits256_value());
}));
}
void run_control_query(ton_api::storage_daemon_deployProvider &query, td::Promise<td::BufferSlice> promise) {
if (!enable_storage_provider_) {
promise.set_error(
td::Status::Error("Storage provider is not enabled, run daemon with --storage-provider to enable it"));
return;
}
if (!provider_.empty() || deploying_provider_) {
promise.set_error(td::Status::Error("Storage provider already exists"));
return;
}
TRY_RESULT_PROMISE_ASSIGN(promise, deploying_provider_, generate_fabric_contract(keyring_.get()));
promise.set_result(create_serialize_tl_object<ton_api::storage_daemon_providerAddress>(
deploying_provider_.value().address.to_string()));
do_deploy_provider();
}
void run_control_query(ton_api::storage_daemon_initProvider &query, td::Promise<td::BufferSlice> promise) {
if (!enable_storage_provider_) {
promise.set_error(
td::Status::Error("Storage provider is not enabled, run daemon with --storage-provider to enable it"));
return;
}
if (!provider_.empty() || deploying_provider_) {
promise.set_error(td::Status::Error("Storage provider already exists"));
return;
}
TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.account_address_), "Invalid address: ");
do_init_provider(
address, promise.wrap([](td::Unit) { return create_serialize_tl_object<ton_api::storage_daemon_success>(); }));
}
void do_init_provider(ContractAddress address, td::Promise<td::Unit> promise, bool deploying = false) {
if (deploying && (!deploying_provider_ || deploying_provider_.value().address != address)) {
promise.set_error(td::Status::Error("Deploying was cancelled"));
return;
}
daemon_config_->provider_address_ = address.to_string();
TRY_STATUS_PROMISE(promise, save_daemon_config());
init_tonlib_client();
provider_ = td::actor::create_actor<StorageProvider>("provider", address, db_root_ + "/provider",
tonlib_client_.get(), manager_.get(), keyring_.get());
deploying_provider_ = {};
promise.set_result(td::Unit());
}
void do_deploy_provider() {
if (!deploying_provider_) {
return;
}
init_tonlib_client();
check_contract_exists(
deploying_provider_.value().address, tonlib_client_.get(),
[SelfId = actor_id(this), client = tonlib_client_.get(),
init = deploying_provider_.value()](td::Result<bool> R) mutable {
if (R.is_error()) {
LOG(INFO) << "Deploying storage contract: " << R.move_as_error();
delay_action([=]() { td::actor::send_closure(SelfId, &StorageDaemon::do_deploy_provider); },
td::Timestamp::in(5.0));
return;
}
if (R.ok()) {
LOG(INFO) << "Deploying storage contract: DONE";
td::actor::send_closure(
SelfId, &StorageDaemon::do_init_provider, init.address, [](td::Result<td::Unit>) {}, true);
return;
}
ContractAddress address = init.address;
td::BufferSlice state_init_boc = vm::std_boc_serialize(init.state_init).move_as_ok();
td::BufferSlice body_boc = vm::std_boc_serialize(init.msg_body).move_as_ok();
auto query = create_tl_object<tonlib_api::raw_createAndSendMessage>(
create_tl_object<tonlib_api::accountAddress>(address.to_string()), state_init_boc.as_slice().str(),
body_boc.as_slice().str());
td::actor::send_closure(
client, &tonlib::TonlibClientWrapper::send_request<tonlib_api::raw_createAndSendMessage>,
std::move(query), [=](td::Result<tl_object_ptr<tonlib_api::ok>> R) {
if (R.is_error()) {
LOG(INFO) << "Deploying storage contract: " << R.move_as_error();
}
delay_action([=]() { td::actor::send_closure(SelfId, &StorageDaemon::do_deploy_provider); },
td::Timestamp::in(5.0));
});
});
}
void run_control_query(ton_api::storage_daemon_removeStorageProvider &query, td::Promise<td::BufferSlice> promise) {
if (!enable_storage_provider_) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
if (provider_.empty() && !deploying_provider_) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
daemon_config_->provider_address_ = "";
TRY_STATUS_PROMISE(promise, save_daemon_config());
deploying_provider_ = {};
provider_ = {};
auto S = td::rmrf(db_root_ + "/provider");
if (S.is_error()) {
LOG(ERROR) << "Failed to delete provider directory: " << S;
}
promise.set_result(create_serialize_tl_object<ton_api::storage_daemon_success>());
}
void run_control_query(ton_api::storage_daemon_getProviderParams &query, td::Promise<td::BufferSlice> promise) {
if (!query.address_.empty()) {
TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: ");
init_tonlib_client();
StorageProvider::get_provider_params(tonlib_client_.get(), address, promise.wrap([](ProviderParams params) {
return serialize_tl_object(params.tl(), true);
}));
}
if (provider_.empty()) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
td::actor::send_closure(provider_, &StorageProvider::get_params, promise.wrap([](ProviderParams params) {
return serialize_tl_object(params.tl(), true);
}));
}
void run_control_query(ton_api::storage_daemon_setProviderParams &query, td::Promise<td::BufferSlice> promise) {
if (provider_.empty()) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
TRY_RESULT_PROMISE(promise, params, ProviderParams::create(query.params_));
td::actor::send_closure(
provider_, &StorageProvider::set_params, std::move(params),
promise.wrap([](td::Unit) mutable { return create_serialize_tl_object<ton_api::storage_daemon_success>(); }));
}
template <class T>
void run_control_query(T &query, td::Promise<td::BufferSlice> promise) {
promise.set_error(td::Status::Error("unknown query"));
}
void run_control_query(ton_api::storage_daemon_getProviderInfo &query, td::Promise<td::BufferSlice> promise) {
if (provider_.empty()) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
td::actor::send_closure(provider_, &StorageProvider::get_provider_info, query.with_balances_, query.with_contracts_,
promise.wrap([](tl_object_ptr<ton_api::storage_daemon_providerInfo> info) {
return serialize_tl_object(info, true);
}));
}
void run_control_query(ton_api::storage_daemon_setProviderConfig &query, td::Promise<td::BufferSlice> promise) {
if (provider_.empty()) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
td::actor::send_closure(
provider_, &StorageProvider::set_provider_config, StorageProvider::Config(query.config_),
promise.wrap([](td::Unit) { return create_serialize_tl_object<ton_api::storage_daemon_success>(); }));
}
void run_control_query(ton_api::storage_daemon_withdraw &query, td::Promise<td::BufferSlice> promise) {
if (provider_.empty()) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.contract_), "Invalid address: ");
td::actor::send_closure(provider_, &StorageProvider::withdraw, address, promise.wrap([](td::Unit) {
return create_serialize_tl_object<ton_api::storage_daemon_success>();
}));
}
void run_control_query(ton_api::storage_daemon_sendCoins &query, td::Promise<td::BufferSlice> promise) {
if (provider_.empty()) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: ");
td::RefInt256 amount = td::string_to_int256(query.amount_);
if (amount.is_null()) {
promise.set_error(td::Status::Error("Invalid amount"));
return;
}
td::actor::send_closure(
provider_, &StorageProvider::send_coins, address, amount, std::move(query.message_),
promise.wrap([](td::Unit) { return create_serialize_tl_object<ton_api::storage_daemon_success>(); }));
}
void run_control_query(ton_api::storage_daemon_closeStorageContract &query, td::Promise<td::BufferSlice> promise) {
if (provider_.empty()) {
promise.set_error(td::Status::Error("No storage provider"));
return;
}
TRY_RESULT_PROMISE_PREFIX(promise, address, ContractAddress::parse(query.address_), "Invalid address: ");
td::actor::send_closure(provider_, &StorageProvider::close_storage_contract, address, promise.wrap([](td::Unit) {
return create_serialize_tl_object<ton_api::storage_daemon_success>();
}));
}
private:
static void fill_torrent_info_short(Torrent &torrent, ton_api::storage_daemon_torrent &obj) {
obj.hash_ = torrent.get_hash();
obj.root_dir_ = torrent.get_root_dir();
if (torrent.inited_info()) {
const Torrent::Info &info = torrent.get_info();
obj.flags_ = 1;
if (torrent.inited_header()) {
obj.flags_ |= 2;
}
obj.total_size_ = info.file_size;
obj.description_ = info.description;
if (torrent.inited_header()) {
obj.included_size_ = torrent.get_included_size();
obj.files_count_ = torrent.get_files_count().unwrap();
obj.dir_name_ = torrent.get_header().dir_name;
}
obj.downloaded_size_ = torrent.get_included_ready_size();
obj.completed_ = torrent.is_completed();
} else {
obj.flags_ = 0;
obj.downloaded_size_ = 0;
obj.completed_ = false;
}
if (torrent.get_fatal_error().is_error()) {
obj.flags_ |= 4;
obj.fatal_error_ = torrent.get_fatal_error().message().str();
}
}
static void fill_torrent_info_full(Torrent &torrent, ton_api::storage_daemon_torrentFull &obj) {
if (!obj.torrent_) {
obj.torrent_ = create_tl_object<ton_api::storage_daemon_torrent>();
}
fill_torrent_info_short(torrent, *obj.torrent_);
obj.files_.clear();
auto count = torrent.get_files_count();
if (!count) {
return;
}
for (size_t i = 0; i < count.value(); ++i) {
auto file = create_tl_object<ton_api::storage_daemon_fileInfo>();
file->name_ = torrent.get_file_name(i).str();
file->size_ = torrent.get_file_size(i);
file->downloaded_size_ = torrent.get_file_ready_size(i);
obj.files_.push_back(std::move(file));
}
}
static void get_torrent_info_short(td::actor::ActorId<StorageManager> manager, td::Bits256 hash,
td::Promise<tl_object_ptr<ton_api::storage_daemon_torrent>> promise) {
td::actor::send_closure(manager, &StorageManager::with_torrent, hash,
[promise = std::move(promise)](td::Result<NodeActor::NodeState> R) mutable {
if (R.is_error()) {
promise.set_result(R.move_as_error());
return;
}
auto state = R.move_as_ok();
auto obj = create_tl_object<ton_api::storage_daemon_torrent>();
fill_torrent_info_short(state.torrent, *obj);
obj->active_download_ = state.active_download;
obj->download_speed_ = state.download_speed;
obj->upload_speed_ = state.upload_speed;
promise.set_result(std::move(obj));
});
}
static void get_torrent_info_full_serialized(td::actor::ActorId<StorageManager> manager, td::Bits256 hash,
td::Promise<td::BufferSlice> promise) {
td::actor::send_closure(manager, &StorageManager::with_torrent, hash,
[promise = std::move(promise)](td::Result<NodeActor::NodeState> R) mutable {
if (R.is_error()) {
promise.set_error(R.move_as_error());
} else {
auto state = R.move_as_ok();
auto obj = create_tl_object<ton_api::storage_daemon_torrentFull>();
fill_torrent_info_full(state.torrent, *obj);
obj->torrent_->active_download_ = state.active_download;
obj->torrent_->download_speed_ = state.download_speed;
obj->torrent_->upload_speed_ = state.upload_speed;
for (size_t i = 0; i < obj->files_.size(); ++i) {
obj->files_[i]->priority_ =
(i < state.file_priority.size() ? state.file_priority[i] : 1);
}
promise.set_result(serialize_tl_object(obj, true));
}
});
}
td::IPAddress ip_addr_;
bool client_mode_;
std::string global_config_;
std::string db_root_;
td::uint16 control_port_;
bool enable_storage_provider_;
tl_object_ptr<ton_api::storage_daemon_config> daemon_config_;
std::shared_ptr<dht::DhtGlobalConfig> dht_config_;
adnl::AdnlNodeIdShort local_id_;
adnl::AdnlNodeIdShort dht_id_;
td::actor::ActorOwn<keyring::Keyring> keyring_;
td::actor::ActorOwn<adnl::AdnlNetworkManager> adnl_network_manager_;
td::actor::ActorOwn<adnl::Adnl> adnl_;
td::actor::ActorOwn<dht::Dht> dht_;
td::actor::ActorOwn<ton_rldp::Rldp> rldp_;
td::actor::ActorOwn<overlay::Overlays> overlays_;
td::actor::ActorOwn<adnl::AdnlExtServer> ext_server_;
td::actor::ActorOwn<StorageManager> manager_;
td::actor::ActorOwn<tonlib::TonlibClientWrapper> tonlib_client_;
td::actor::ActorOwn<StorageProvider> provider_;
td::optional<FabricContractInit> deploying_provider_;
void init_tonlib_client() {
if (!tonlib_client_.empty()) {
return;
}
auto r_conf_data = td::read_file(global_config_);
r_conf_data.ensure();
auto tonlib_options = tonlib_api::make_object<tonlib_api::options>(
tonlib_api::make_object<tonlib_api::config>(r_conf_data.move_as_ok().as_slice().str(), "", false, false),
tonlib_api::make_object<tonlib_api::keyStoreTypeInMemory>());
tonlib_client_ = td::actor::create_actor<tonlib::TonlibClientWrapper>("tonlibclient", std::move(tonlib_options));
}
std::string daemon_config_file() {
return db_root_ + "/config.json";
}
};
int main(int argc, char *argv[]) {
SET_VERBOSITY_LEVEL(verbosity_WARNING);
td::set_default_failure_signal_handler().ensure();
td::unique_ptr<td::LogInterface> logger_;
SCOPE_EXIT {
td::log_interface = td::default_log_interface;
};
td::IPAddress ip_addr;
bool client_mode = false;
std::string global_config, db_root;
td::uint16 control_port = 0;
bool enable_storage_provider = false;
td::OptionParser p;
p.set_description("Server for seeding and downloading bags of files (torrents)\n");
p.add_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) {
int v = VERBOSITY_NAME(FATAL) + (td::to_integer<int>(arg));
SET_VERBOSITY_LEVEL(v);
});
p.add_option('V', "version", "shows storage-daemon build information", [&]() {
std::cout << "storage-daemon build information: [ Commit: " << GitMetadata::CommitSHA1()
<< ", Date: " << GitMetadata::CommitDate() << "]\n";
std::exit(0);
});
p.add_option('h', "help", "prints a help message", [&]() {
char b[10240];
td::StringBuilder sb(td::MutableSlice{b, 10000});
sb << p;
std::cout << sb.as_cslice().c_str();
std::exit(2);
});
p.add_checked_option('I', "ip", "set <ip>:<port> for adnl. :<port> for client mode",
[&](td::Slice arg) -> td::Status {
if (ip_addr.is_valid()) {
return td::Status::Error("Duplicate ip address");
}
if (!arg.empty() && arg[0] == ':') {
TRY_RESULT(port, td::to_integer_safe<td::uint16>(arg.substr(1)));
TRY_STATUS(ip_addr.init_ipv4_port("127.0.0.1", port));
client_mode = true;
} else {
TRY_STATUS(ip_addr.init_host_port(arg.str()));
}
return td::Status::OK();
});
p.add_checked_option('p', "control-port", "port for control interface", [&](td::Slice arg) -> td::Status {
TRY_RESULT_ASSIGN(control_port, td::to_integer_safe<td::uint16>(arg));
return td::Status::OK();
});
p.add_option('C', "global-config", "global TON configuration file",
[&](td::Slice arg) { global_config = arg.str(); });
p.add_option('D', "db", "db root", [&](td::Slice arg) { db_root = arg.str(); });
p.add_option('d', "daemonize", "set SIGHUP", [&]() {
td::set_signal_handler(td::SignalType::HangUp, [](int sig) {
#if TD_DARWIN || TD_LINUX
close(0);
setsid();
#endif
}).ensure();
});
p.add_option('l', "logname", "log to file", [&](td::Slice fname) {
logger_ = td::FileLog::create(fname.str()).move_as_ok();
td::log_interface = logger_.get();
});
p.add_option('P', "storage-provider", "run storage provider", [&]() { enable_storage_provider = true; });
td::actor::Scheduler scheduler({7});
scheduler.run_in_context([&] {
p.run(argc, argv).ensure();
td::actor::create_actor<StorageDaemon>("storage-daemon", ip_addr, client_mode, global_config, db_root, control_port,
enable_storage_provider)
.release();
});
while (scheduler.run(1)) {
}
}

View file

@ -58,7 +58,6 @@
#include "Bitset.h"
#include "PeerState.h"
#include "SharedState.h"
#include "Torrent.h"
#include "TorrentCreator.h"
@ -778,7 +777,7 @@ TEST(Rldp, Main) {
RldpBasicTest::run(Options::create(1, 100 * MegaByte, NetChannel::Options::perfect_net()));
}
/*
TEST(MerkleTree, Manual) {
td::Random::Xorshift128plus rnd(123);
// create big random file
@ -803,7 +802,7 @@ TEST(MerkleTree, Manual) {
timer = {};
LOG(INFO) << "Init merkle tree";
size_t i = 0;
ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Chunk{i++, x}; }));
ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Piece{i++, x}; }));
LOG(INFO) << timer;
auto root_proof = tree.gen_proof(0, chunks_count - 1).move_as_ok();
@ -830,15 +829,15 @@ TEST(MerkleTree, Manual) {
other_new_tree.add_proof(tree.gen_proof(i, i + stride - 1).move_as_ok()).ensure();
other_new_tree.gen_proof(i, i + stride - 1).ensure();
other_new_tree.get_root(2);
std::vector<ton::MerkleTree::Chunk> chunks;
std::vector<ton::MerkleTree::Piece> chunks;
for (size_t j = 0; j < stride && i + j < chunks_count; j++) {
chunks.push_back({i + j, hashes.at(i + j)});
}
new_tree.try_add_chunks(chunks).ensure();
new_tree.try_add_pieces(chunks).ensure();
}
if (stride == 1) {
std::vector<ton::MerkleTree::Chunk> chunks;
std::vector<ton::MerkleTree::Piece> chunks;
for (size_t i = 0; i < chunks_count; i++) {
if (rnd.fast(0, 1) == 1) {
@ -848,7 +847,7 @@ TEST(MerkleTree, Manual) {
}
}
td::Bitset bitmask;
other_new_tree.add_chunks(chunks, bitmask);
other_new_tree.add_pieces(chunks, bitmask);
for (size_t i = 0; i < chunks_count; i++) {
auto expected = chunks[i].hash == hashes[i];
auto got = bitmask.get(i);
@ -874,9 +873,9 @@ TEST(MerkleTree, Stress) {
}
}
size_t i = 0;
ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Chunk{i++, x}; }));
ton::MerkleTree tree(td::transform(hashes, [&i](auto &x) { return ton::MerkleTree::Piece{i++, x}; }));
for (int t2 = 0; t2 < 1000; t2++) {
std::vector<ton::MerkleTree::Chunk> chunks;
std::vector<ton::MerkleTree::Piece> chunks;
int mask = rnd.fast(0, (1 << chunks_count) - 1);
for (size_t i = 0; i < chunks_count; i++) {
@ -889,8 +888,8 @@ TEST(MerkleTree, Stress) {
td::Bitset bitmask_strict;
td::Bitset bitmask;
ton::MerkleTree new_tree(chunks_count, tree.get_root(rnd.fast(1, 5)));
tree.add_chunks(chunks, bitmask_strict);
new_tree.add_chunks(chunks, bitmask);
tree.add_pieces(chunks, bitmask_strict);
new_tree.add_pieces(chunks, bitmask);
for (size_t i = 0; i < chunks_count; i++) {
auto expected = chunks[i].hash == hashes[i];
auto strict_got = bitmask_strict.get(i);
@ -901,7 +900,7 @@ TEST(MerkleTree, Stress) {
}
}
}
};
};*/
struct TorrentMetas {
td::optional<ton::Torrent> torrent;
@ -985,6 +984,7 @@ TEST(Torrent, Meta) {
torrent_file.header = {};
torrent_file.root_proof = {};
auto new_torrent = ton::Torrent::open(options, torrent_file).move_as_ok();
new_torrent.enable_write_to_files();
std::vector<size_t> order;
for (size_t i = 0; i < torrent.get_info().pieces_count(); i++) {
@ -1039,6 +1039,7 @@ TEST(Torrent, OneFile) {
ton::Torrent::Options options;
options.root_dir = "second/";
auto other_torrent = ton::Torrent::open(options, meta).move_as_ok();
other_torrent.enable_write_to_files();
CHECK(!other_torrent.is_completed());
other_torrent.add_piece(0, torrent.get_piece_data(0).move_as_ok(), torrent.get_piece_proof(0).move_as_ok())
.ensure();
@ -1190,17 +1191,12 @@ TEST(Torrent, Peer) {
}
};
class PeerCreator : public ton::NodeActor::Callback {
class PeerCreator : public ton::NodeActor::NodeCallback {
public:
PeerCreator(td::actor::ActorId<PeerManager> peer_manager, ton::PeerId self_id, std::vector<ton::PeerId> peers,
std::shared_ptr<td::Destructor> stop_watcher, std::shared_ptr<td::Destructor> complete_watcher)
: peer_manager_(std::move(peer_manager))
, peers_(std::move(peers))
, self_id_(self_id)
, stop_watcher_(stop_watcher)
, complete_watcher_(complete_watcher) {
PeerCreator(td::actor::ActorId<PeerManager> peer_manager, ton::PeerId self_id, std::vector<ton::PeerId> peers)
: peer_manager_(std::move(peer_manager)), peers_(std::move(peers)), self_id_(self_id) {
}
void get_peers(td::Promise<std::vector<ton::PeerId>> promise) override {
void get_peers(ton::PeerId src, td::Promise<std::vector<ton::PeerId>> promise) override {
auto peers = peers_;
promise.set_value(std::move(peers));
}
@ -1209,7 +1205,7 @@ TEST(Torrent, Peer) {
send_closure(peer_manager_, &PeerManager::register_node, self_id_, self_);
}
td::actor::ActorOwn<ton::PeerActor> create_peer(ton::PeerId self_id, ton::PeerId peer_id,
td::SharedState<ton::PeerState> state) override {
std::shared_ptr<ton::PeerState> state) override {
class PeerCallback : public ton::PeerActor::Callback {
public:
PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId<PeerManager> peer_manager)
@ -1254,6 +1250,19 @@ TEST(Torrent, Peer) {
std::move(state));
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
std::vector<ton::PeerId> peers_;
ton::PeerId self_id_;
td::actor::ActorId<ton::NodeActor> self_;
};
class TorrentCallback : public ton::NodeActor::Callback {
public:
TorrentCallback(std::shared_ptr<td::Destructor> stop_watcher, std::shared_ptr<td::Destructor> complete_watcher)
: stop_watcher_(stop_watcher), complete_watcher_(complete_watcher) {
}
void on_completed() override {
complete_watcher_.reset();
}
@ -1265,12 +1274,8 @@ TEST(Torrent, Peer) {
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
std::vector<ton::PeerId> peers_;
ton::PeerId self_id_;
std::shared_ptr<td::Destructor> stop_watcher_;
std::shared_ptr<td::Destructor> complete_watcher_;
td::actor::ActorId<ton::NodeActor> self_;
};
size_t peers_n = 20;
@ -1314,12 +1319,11 @@ TEST(Torrent, Peer) {
alarm_timestamp() = td::Timestamp::in(1);
}
void alarm() override {
send_closure(node_actor_, &ton::NodeActor::with_torrent, [](td::Result<ton::Torrent *> r_torrent) {
if (r_torrent.is_error()) {
send_closure(node_actor_, &ton::NodeActor::with_torrent, [](td::Result<ton::NodeActor::NodeState> r_state) {
if (r_state.is_error()) {
return;
}
auto torrent = r_torrent.move_as_ok();
print_debug(torrent);
print_debug(&r_state.ok().torrent);
});
alarm_timestamp() = td::Timestamp::in(4);
}
@ -1337,14 +1341,17 @@ TEST(Torrent, Peer) {
auto peer_manager = td::actor::create_actor<PeerManager>("PeerManager");
guard->push_back(td::actor::create_actor<ton::NodeActor>(
"Node#1", 1, std::move(torrent),
td::make_unique<PeerCreator>(peer_manager.get(), 1, gen_peers(1, 2), stop_watcher, complete_watcher)));
td::make_unique<TorrentCallback>(stop_watcher, complete_watcher),
td::make_unique<PeerCreator>(peer_manager.get(), 1, gen_peers(1, 2)), nullptr));
for (size_t i = 2; i <= peers_n; i++) {
ton::Torrent::Options options;
options.in_memory = true;
auto other_torrent = ton::Torrent::open(options, ton::TorrentMeta(info)).move_as_ok();
auto node_actor = td::actor::create_actor<ton::NodeActor>(
PSLICE() << "Node#" << i, i, std::move(other_torrent),
td::make_unique<PeerCreator>(peer_manager.get(), i, gen_peers(i, 2), stop_watcher, complete_watcher));
td::make_unique<TorrentCallback>(stop_watcher, complete_watcher),
td::make_unique<PeerCreator>(peer_manager.get(), i, gen_peers(i, 2)),
nullptr);
if (i == 3) {
td::actor::create_actor<StatsActor>("StatsActor", node_actor.get()).release();

View file

@ -701,8 +701,9 @@ storage.ok = Ok;
storage.state will_upload:Bool want_download:Bool = storage.State;
storage.piece proof:bytes data:bytes = storage.Piece;
storage.torrentInfo data:bytes = storage.TorrentInfo;
storage.updateInit have_pieces:bytes state:storage.State = storage.Update;
storage.updateInit have_pieces:bytes have_pieces_offset:int state:storage.State = storage.Update;
storage.updateHavePieces piece_id:(vector int) = storage.Update;
storage.updateState state:storage.State = storage.Update;
@ -711,10 +712,9 @@ storage.updateState state:storage.State = storage.Update;
storage.ping session_id:long = storage.Pong;
storage.addUpdate session_id:long seqno:int update:storage.Update = Ok;
storage.getTorrentInfo = storage.TorrentInfo;
storage.getPiece piece_id:int = storage.Piece;
storage.queryPrefix id:int256 = Object;
---types---
http.header name:string value:string = http.Header;
@ -745,3 +745,116 @@ validatorSession.statsRound timestamp:long producers:(vector validatorSession.st
validatorSession.stats id:tonNode.blockId timestamp:long self:int256 creator:int256 total_validators:int total_weight:long
signatures:int signatures_weight:long approve_signatures:int approve_signatures_weight:long
first_round:int rounds:(vector validatorSession.statsRound) = validatorSession.Stats;
---functions---
---types---
storage.db.key.torrentList = storage.db.key.TorrentList;
storage.db.key.torrent hash:int256 = storage.db.key.TorrentShort;
storage.db.key.torrentMeta hash:int256 = storage.db.key.TorrentMeta;
storage.db.key.priorities hash:int256 = storage.db.key.Priorities;
storage.db.key.piecesInDb hash:int256 = storage.db.key.PiecesInDb;
storage.db.key.pieceInDb hash:int256 idx:long = storage.db.key.PieceInDb;
storage.db.torrentList torrents:(vector int256) = storage.db.TorrentList;
storage.db.torrent root_dir:string active_download:Bool = storage.db.TorrentShort;
storage.db.priorities actions:(vector storage.PriorityAction) = storage.db.Priorities;
storage.db.piecesInDb pieces:(vector long) = storage.db.PiecesInDb;
storage.priorityAction.all priority:int = storage.PriorityAction;
storage.priorityAction.idx idx:long priority:int = storage.PriorityAction;
storage.priorityAction.name name:string priority:int = storage.PriorityAction;
storage.daemon.config server_key:PublicKey cli_key_hash:int256 provider_address:string adnl_id:PublicKey dht_id:PublicKey = storage.daemon.provider.Config;
storage.daemon.provider.params accept_new_contracts:Bool rate_per_mb_day:string max_span:int
minimal_file_size:long maximal_file_size:long = storage.daemon.provider.Params;
storage.provider.db.key.state = storage.provider.db.key.State;
storage.provider.db.key.contractList = storage.provider.db.key.ContractList;
storage.provider.db.key.storageContract wc:int addr:int256 = storage.provider.db.key.StorageContract;
storage.provider.db.key.microchunkTree wc:int addr:int256 = storage.provider.db.key.MicrochunkTree;
storage.provider.db.key.providerConfig = storage.provider.db.key.ProviderConfig;
storage.provider.db.state last_processed_lt:long = storage.provider.db.State;
storage.provider.db.contractAddress wc:int addr:int256 = storage.db.ContractAddress;
storage.provider.db.contractList contracts:(vector storage.provider.db.contractAddress) = storage.db.ContractList;
storage.provider.db.storageContract torrent_hash:int256 microchunk_hash:int256 created_time:int state:int file_size:long
rate:string max_span:int = storage.provider.db.StorageContract;
storage.provider.db.microchunkTree data:bytes = storage.provider.db.MicrochunkTree;
storage.daemon.queryError message:string = storage.daemon.QueryError;
storage.daemon.success = storage.daemon.Success;
storage.daemon.torrent
hash:int256 flags:#
// 0 - info ready
// 1 - header ready
// 2 - fatal error
total_size:flags.0?long description:flags.0?string
files_count:flags.1?long included_size:flags.1?long dir_name:flags.1?string
downloaded_size:long
root_dir:string active_download:Bool completed:Bool
download_speed:double upload_speed:double
fatal_error:flags.2?string
= storage.daemon.Torrent;
storage.daemon.fileInfo
name:string size:long
priority:int
downloaded_size:long
= storage.daemon.FileInfo;
storage.daemon.torrentFull torrent:storage.daemon.torrent files:(vector storage.daemon.fileInfo) = storage.daemon.TorrentFull;
storage.daemon.torrentList torrents:(vector storage.daemon.torrent) = storage.daemon.TorrentList;
storage.daemon.torrentMeta meta:bytes = storage.daemon.TorrentMeta;
storage.daemon.newContractParams rate:string max_span:int = storage.daemon.NewContractParams;
storage.daemon.newContractParamsAuto provider_address:string = storage.daemon.NewContractParams;
storage.daemon.newContractMessage body:bytes rate:string max_span:int = storage.daemon.NewContractMessage;
storage.daemon.peer adnl_id:int256 ip_str:string download_speed:double upload_speed:double ready_parts:long = storage.daemon.Peer;
storage.daemon.peerList peers:(vector storage.daemon.peer) download_speed:double upload_speed:double total_parts:long = storage.daemon.PeerList;
storage.daemon.prioritySet = storage.daemon.SetPriorityStatus;
storage.daemon.priorityPending = storage.daemon.SetPriorityStatus;
storage.daemon.keyHash key_hash:int256 = storage.daemon.KeyHash;
storage.daemon.providerConfig max_contracts:int max_total_size:long = storage.daemon.ProviderConfig;
storage.daemon.contractInfo address:string state:int torrent:int256 created_time:int file_size:long downloaded_size:long
rate:string max_span:int client_balance:string contract_balance:string = storage.daemon.ContractInfo;
storage.daemon.providerInfo address:string balance:string config:storage.daemon.providerConfig
contracts_count:int contracts_total_size:long
contracts:(vector storage.daemon.contractInfo) = storage.daemon.ProviderInfo;
storage.daemon.providerAddress address:string = storage.daemon.ProviderAddress;
---functions---
storage.daemon.setVerbosity verbosity:int = storage.daemon.Success;
storage.daemon.createTorrent path:string description:string = storage.daemon.TorrentFull;
storage.daemon.addByHash hash:int256 root_dir:string start_download:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull;
storage.daemon.addByMeta meta:bytes root_dir:string start_download:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull;
storage.daemon.setActiveDownload hash:int256 active:Bool = storage.daemon.Success;
storage.daemon.getTorrents = storage.daemon.TorrentList;
storage.daemon.getTorrentFull hash:int256 = storage.daemon.TorrentFull;
storage.daemon.getTorrentMeta hash:int256 = storage.daemon.TorrentMeta;
storage.daemon.getNewContractMessage hash:int256 query_id:long params:storage.daemon.NewContractParams = storage.daemon.NewContractMessage;
storage.daemon.getTorrentPeers hash:int256 = storage.daemon.PeerList;
storage.daemon.setFilePriorityAll hash:int256 priority:int = storage.daemon.SetPriorityStatus;
storage.daemon.setFilePriorityByIdx hash:int256 idx:long priority:int = storage.daemon.SetPriorityStatus;
storage.daemon.setFilePriorityByName hash:int256 name:string priority:int = storage.daemon.SetPriorityStatus;
storage.daemon.removeTorrent hash:int256 remove_files:Bool = storage.daemon.Success;
storage.daemon.loadFrom hash:int256 meta:bytes path:string = storage.daemon.Torrent;
storage.daemon.importPrivateKey key:PrivateKey = storage.daemon.KeyHash;
storage.daemon.initProvider account_address:string = storage.daemon.Success;
storage.daemon.deployProvider = storage.daemon.ProviderAddress;
storage.daemon.getProviderParams address:string = storage.daemon.provider.Params;
storage.daemon.setProviderParams params:storage.daemon.provider.params = storage.daemon.Success;
storage.daemon.getProviderInfo with_balances:Bool with_contracts:Bool = storage.daemon.ProviderInfo;
storage.daemon.setProviderConfig config:storage.daemon.providerConfig = storage.daemon.Success;
storage.daemon.withdraw contract:string = storage.daemon.Success;
storage.daemon.sendCoins address:string amount:string message:string = storage.daemon.Success;
storage.daemon.closeStorageContract address:string = storage.daemon.Success;
storage.daemon.removeStorageProvider = storage.daemon.Success;

Binary file not shown.

View file

@ -291,7 +291,7 @@ query.estimateFees id:int53 ignore_chksig:Bool = query.Fees;
query.getInfo id:int53 = query.Info;
smc.load account_address:accountAddress = smc.Info;
//smc.forget id:int53 = Ok;
smc.forget id:int53 = Ok;
smc.getCode id:int53 = tvm.Cell;
smc.getData id:int53 = tvm.Cell;
smc.getState id:int53 = tvm.Cell;

Binary file not shown.

View file

@ -17,6 +17,7 @@ set(TONLIB_SOURCE
tonlib/LastConfig.cpp
tonlib/Logging.cpp
tonlib/TonlibClient.cpp
tonlib/TonlibClientWrapper.cpp
tonlib/utils.cpp
tonlib/Client.h
@ -32,6 +33,7 @@ set(TONLIB_SOURCE
tonlib/Logging.h
tonlib/TonlibCallback.h
tonlib/TonlibClient.h
tonlib/TonlibClientWrapper.h
tonlib/utils.h
tonlib/keys/bip39.cpp

View file

@ -2533,7 +2533,7 @@ struct ToRawTransactions {
auto body_hash = body_cell->get_hash().as_slice().str();
td::Ref<vm::Cell> init_state_cell;
auto& init_state_cs = message.init.write();
auto& init_state_cs = message.init.write();
if (init_state_cs.fetch_ulong(1) == 1) {
if (init_state_cs.fetch_long(1) == 0) {
init_state_cell = vm::CellBuilder().append_cellslice(init_state_cs).finalize();
@ -2542,7 +2542,7 @@ struct ToRawTransactions {
}
}
auto get_data = [body = std::move(body), body_cell = std::move(body_cell),
auto get_data = [body = std::move(body), body_cell = std::move(body_cell),
init_state_cell = std::move(init_state_cell), this](td::Slice salt) mutable {
tonlib_api::object_ptr<tonlib_api::msg_Data> data;
if (try_decode_messages_ && body->size() >= 32 && static_cast<td::uint32>(body->prefetch_long(32)) <= 1) {
@ -2731,7 +2731,7 @@ td::Status TonlibClient::do_request(const tonlib_api::raw_sendMessageReturnHash&
td::Promise<object_ptr<tonlib_api::raw_extMessageInfo>>&& promise) {
TRY_RESULT_PREFIX(body, vm::std_boc_deserialize(request.body_), TonlibError::InvalidBagOfCells("body"));
auto hash = body->get_hash().as_slice().str();
make_request(int_api::SendMessage{std::move(body)},
make_request(int_api::SendMessage{std::move(body)},
promise.wrap([hash = std::move(hash)](auto res) {
return tonlib_api::make_object<tonlib_api::raw_extMessageInfo>(std::move(hash));
}));
@ -2859,7 +2859,7 @@ td::Status TonlibClient::do_request(tonlib_api::raw_getTransactionsV2& request,
auto actor_id = actor_id_++;
actors_[actor_id] = td::actor::create_actor<GetTransactionHistory>(
"GetTransactionHistory", client_.get_client(), account_address, lt, hash, count, actor_shared(this, actor_id),
"GetTransactionHistory", client_.get_client(), account_address, lt, hash, count, actor_shared(this, actor_id),
promise.wrap([private_key = std::move(private_key), try_decode_messages = request.try_decode_messages_](auto&& x) mutable {
return ToRawTransactions(std::move(private_key), try_decode_messages).to_raw_transactions(std::move(x));
}));
@ -3710,6 +3710,17 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_load& request,
return td::Status::OK();
}
td::Status TonlibClient::do_request(const tonlib_api::smc_forget& request,
td::Promise<object_ptr<tonlib_api::ok>>&& promise) {
auto it = smcs_.find(request.id_);
if (it == smcs_.end()) {
return TonlibError::InvalidSmcId();
}
smcs_.erase(it);
promise.set_value(tonlib_api::make_object<tonlib_api::ok>());
return td::Status::OK();
}
td::Status TonlibClient::do_request(const tonlib_api::smc_getCode& request,
td::Promise<object_ptr<tonlib_api::tvm_cell>>&& promise) {
auto it = smcs_.find(request.id_);
@ -3890,7 +3901,7 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_getLibraries& request,
return td::Status::OK();
}
client_.send_query(ton::lite_api::liteServer_getLibraries(std::move(not_cached_hashes)),
client_.send_query(ton::lite_api::liteServer_getLibraries(std::move(not_cached_hashes)),
promise.wrap([self=this, result_entries = std::move(result_entries)]
(td::Result<ton::lite_api::object_ptr<ton::lite_api::liteServer_libraryResult>> r_libraries) mutable
{
@ -4632,7 +4643,7 @@ td::Status TonlibClient::do_request(const tonlib_api::getConfigParam& request,
std::vector<int32_t> params = { param };
client_.send_query(ton::lite_api::liteServer_getConfigParams(0, std::move(lite_block), std::move(params)),
promise.wrap([block, param](auto r_config) {
promise.wrap([block, param](auto r_config) {
auto state = block::check_extract_state_proof(block, r_config->state_proof_.as_slice(),
r_config->config_proof_.as_slice());
if (state.is_error()) {
@ -4716,7 +4727,7 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_lookupBlock& reques
auto to_tonlib_api(const ton::lite_api::liteServer_transactionId& txid)
-> tonlib_api_ptr<tonlib_api::blocks_shortTxId> {
return tonlib_api::make_object<tonlib_api::blocks_shortTxId>(
return tonlib_api::make_object<tonlib_api::blocks_shortTxId>(
txid.mode_, txid.account_.as_slice().str(), txid.lt_, txid.hash_.as_slice().str());
}

View file

@ -305,6 +305,7 @@ class TonlibClient : public td::actor::Actor {
td::Result<tonlib_api::object_ptr<tonlib_api::smc_info>> get_smc_info(td::int64 id);
void finish_load_smc(td::unique_ptr<AccountState> query, td::Promise<object_ptr<tonlib_api::smc_info>>&& promise);
td::Status do_request(const tonlib_api::smc_load& request, td::Promise<object_ptr<tonlib_api::smc_info>>&& promise);
td::Status do_request(const tonlib_api::smc_forget& request, td::Promise<object_ptr<tonlib_api::ok>>&& promise);
td::Status do_request(const tonlib_api::smc_getCode& request,
td::Promise<object_ptr<tonlib_api::tvm_cell>>&& promise);
td::Status do_request(const tonlib_api::smc_getData& request,

View file

@ -23,44 +23,40 @@
exception statement from your version. If you delete this exception statement
from all source files in the program, then also delete it here.
*/
#include "TonlibClient.h"
#include "TonlibClientWrapper.h"
TonlibClient::TonlibClient(ton::tl_object_ptr<tonlib_api::options> options) : options_(std::move(options)) {
namespace tonlib {
TonlibClientWrapper::TonlibClientWrapper(ton::tl_object_ptr<tonlib_api::options> options)
: options_(std::move(options)) {
}
void TonlibClient::start_up() {
void TonlibClientWrapper::start_up() {
class Cb : public tonlib::TonlibCallback {
public:
explicit Cb(td::actor::ActorId<TonlibClient> self_id) : self_id_(self_id) {
explicit Cb(td::actor::ActorId<TonlibClientWrapper> self_id) : self_id_(self_id) {
}
void on_result(std::uint64_t id, tonlib_api::object_ptr<tonlib_api::Object> result) override {
td::actor::send_closure(self_id_, &TonlibClient::receive_request_result, id, std::move(result));
td::actor::send_closure(self_id_, &TonlibClientWrapper::receive_request_result, id, std::move(result));
}
void on_error(std::uint64_t id, tonlib_api::object_ptr<tonlib_api::error> error) override {
td::actor::send_closure(self_id_, &TonlibClient::receive_request_result, id,
td::actor::send_closure(self_id_, &TonlibClientWrapper::receive_request_result, id,
td::Status::Error(error->code_, std::move(error->message_)));
}
private:
td::actor::ActorId<TonlibClient> self_id_;
td::actor::ActorId<TonlibClientWrapper> self_id_;
};
tonlib_client_ = td::actor::create_actor<tonlib::TonlibClient>("tonlibclient", td::make_unique<Cb>(actor_id(this)));
auto init = tonlib_api::make_object<tonlib_api::init>(std::move(options_));
auto P = td::PromiseCreator::lambda([](td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R) mutable {
R.ensure();
});
auto P = td::PromiseCreator::lambda(
[](td::Result<tonlib_api::object_ptr<tonlib_api::options_info>> R) mutable { R.ensure(); });
send_request(std::move(init), std::move(P));
}
void TonlibClient::send_request(tonlib_api::object_ptr<tonlib_api::Function> obj,
td::Promise<tonlib_api::object_ptr<tonlib_api::Object>> promise) {
auto id = next_request_id_++;
CHECK(requests_.emplace(id, std::move(promise)).second);
td::actor::send_closure(tonlib_client_, &tonlib::TonlibClient::request, id, std::move(obj));
}
void TonlibClient::receive_request_result(td::uint64 id, td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R) {
void TonlibClientWrapper::receive_request_result(td::uint64 id,
td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R) {
if (id == 0) {
return;
}
@ -69,4 +65,6 @@ void TonlibClient::receive_request_result(td::uint64 id, td::Result<tonlib_api::
auto promise = std::move(it->second);
requests_.erase(it);
promise.set_result(std::move(R));
}
}
} // namespace tonlib

View file

@ -28,14 +28,26 @@
#include "auto/tl/tonlib_api.hpp"
#include "tonlib/tonlib/TonlibClient.h"
class TonlibClient : public td::actor::Actor {
namespace tonlib {
class TonlibClientWrapper : public td::actor::Actor {
public:
explicit TonlibClient(ton::tl_object_ptr<tonlib_api::options> options);
explicit TonlibClientWrapper(ton::tl_object_ptr<tonlib_api::options> options);
void start_up() override;
void send_request(tonlib_api::object_ptr<tonlib_api::Function> obj,
td::Promise<tonlib_api::object_ptr<tonlib_api::Object>> promise);
template <typename F>
void send_request(tonlib_api::object_ptr<F> obj, td::Promise<typename F::ReturnType> promise) {
auto id = next_request_id_++;
auto P = promise.wrap([](tonlib_api::object_ptr<tonlib_api::Object> x) -> td::Result<typename F::ReturnType> {
if (x->get_id() != F::ReturnType::element_type::ID) {
return td::Status::Error("Invalid response from tonlib");
}
return ton::move_tl_object_as<typename F::ReturnType::element_type>(std::move(x));
});
CHECK(requests_.emplace(id, std::move(P)).second);
td::actor::send_closure(tonlib_client_, &tonlib::TonlibClient::request, id, std::move(obj));
}
private:
void receive_request_result(td::uint64 id, td::Result<tonlib_api::object_ptr<tonlib_api::Object>> R);
@ -45,3 +57,5 @@ class TonlibClient : public td::actor::Actor {
std::map<td::uint64, td::Promise<tonlib_api::object_ptr<tonlib_api::Object>>> requests_;
td::uint64 next_request_id_{1};
};
} // namespace tonlib