1
0
Fork 0
mirror of https://github.com/ton-blockchain/ton synced 2025-03-09 15:40:10 +00:00

integrating the existing state of TON Storage / TON Payments / CPS Fift development branches

This commit is contained in:
ton 2020-05-27 22:10:46 +04:00
parent 040df63c98
commit 4e2624459b
153 changed files with 10760 additions and 1695 deletions

69
storage/Bitset.h Normal file
View file

@ -0,0 +1,69 @@
#pragma once
#include "td/utils/Slice.h"
#include "td/utils/logging.h"
namespace td {
struct Bitset {
public:
td::Slice as_slice() const {
return td::Slice(bits_).substr(0, (bits_size_ + 7) / 8);
}
bool get(size_t offset) const {
auto i = offset / 8;
if (i >= bits_.size()) {
return false;
}
auto bit_i = offset % 8;
return (bits_[i] & (1 << bit_i)) != 0;
}
void reserve(size_t offset) {
auto i = offset / 8;
if (i >= bits_.size()) {
bits_.resize(i + 1);
}
}
bool set_one(size_t offset) {
auto i = offset / 8;
auto bit_i = offset % 8;
bits_size_ = std::max(bits_size_, offset + 1);
if (i >= bits_.size()) {
bits_.resize(std::max(i + 1, bits_.size() * 2));
}
auto mask = 1 << bit_i;
if ((bits_[i] & mask) == 0) {
bits_[i] |= mask;
count_++;
return true;
}
return false;
}
size_t ones_count() const {
return count_;
}
void set_raw(std::string bits) {
bits_ = std::move(bits);
bits_size_ = 0;
count_ = 0;
for (size_t n = size(), i = 0; i < n; i++) {
if (get(i)) {
count_++;
bits_size_ = i + 1;
}
}
}
size_t size() const {
return bits_.size() * 8;
}
private:
std::string bits_;
size_t bits_size_{0};
size_t count_{0};
};
} // namespace td

55
storage/CMakeLists.txt Normal file
View file

@ -0,0 +1,55 @@
cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR)
if (NOT OPENSSL_FOUND)
find_package(OpenSSL REQUIRED)
endif()
set(STORAGE_SOURCE
LoadSpeed.cpp
MerkleTree.cpp
NodeActor.cpp
PeerActor.cpp
PeerState.cpp
Torrent.cpp
TorrentCreator.cpp
TorrentHeader.cpp
TorrentInfo.cpp
TorrentMeta.cpp
Bitset.h
LoadSpeed.h
MerkleTree.h
NodeActor.h
PartsHelper.h
PeerActor.h
PeerState.h
SharedState.h
Torrent.h
TorrentCreator.h
TorrentHeader.h
TorrentInfo.h
TorrentMeta.h
)
set(STORAGE_CLI_SOURCE
storage-cli.cpp
)
add_library(storage ${STORAGE_SOURCE})
target_link_libraries(storage tdutils tdactor tddb ton_crypto tl_api ${JEMALLOC_LIBRARIES})
target_include_directories(storage PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
)
add_executable(storage-cli ${STORAGE_CLI_SOURCE})
target_link_libraries(storage-cli storage overlay tdutils tdactor adnl tl_api dht
rldp rldp2 catchain validatorsession full-node validator ton_validator validator
fift-lib memprof terminal ${JEMALLOC_LIBRARIES})
set(STORAGE_TEST_SOURCE
${CMAKE_CURRENT_SOURCE_DIR}/test/storage.cpp
PARENT_SCOPE
)
# Do not install it yet
#install(TARGETS storage-cli RUNTIME DESTINATION bin)

33
storage/LoadSpeed.cpp Normal file
View file

@ -0,0 +1,33 @@
#include "LoadSpeed.h"
#include "td/utils/format.h"
namespace ton {
void LoadSpeed::add(td::size_t size, td::Timestamp now) {
total_size_ += size;
events_.push(Event{size, now});
update(now);
}
double LoadSpeed::speed(td::Timestamp now) const {
update(now);
return total_size_ / duration();
}
td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed) {
return sb << td::format::as_size(static_cast<td::uint64>(speed.speed())) << "/s";
}
void LoadSpeed::update(td::Timestamp now) const {
while (duration() > 60) {
total_size_ -= events_.front().size;
events_.pop();
}
}
double LoadSpeed::duration() const {
double res = 5;
if (events_.size() > 1) {
res = std::max(res, events_.back().at.at() - events_.front().at.at());
}
return res;
}
} // namespace ton

25
storage/LoadSpeed.h Normal file
View file

@ -0,0 +1,25 @@
#pragma once
#include "td/utils/StringBuilder.h"
#include "td/utils/Time.h"
#include "td/utils/VectorQueue.h"
namespace ton {
class LoadSpeed {
public:
void add(td::size_t size, td::Timestamp now);
double speed(td::Timestamp now = td::Timestamp::now()) const;
friend td::StringBuilder &operator<<(td::StringBuilder &sb, const LoadSpeed &speed);
private:
struct Event {
td::size_t size;
td::Timestamp at;
};
mutable td::VectorQueue<Event> events_;
mutable td::size_t total_size_{0};
double duration() const;
void update(td::Timestamp now) const;
};
} // namespace ton

342
storage/MerkleTree.cpp Normal file
View file

@ -0,0 +1,342 @@
#include "MerkleTree.h"
#include "common/bitstring.h"
#include "td/utils/UInt.h"
#include "vm/cells/CellSlice.h"
#include "vm/cells/MerkleProof.h"
#include "vm/cellslice.h"
#include "vm/excno.hpp"
namespace ton {
static td::Ref<vm::Cell> unpack_proof(td::Ref<vm::Cell> root) {
vm::CellSlice cs(vm::NoVm(), root);
CHECK(cs.special_type() == vm::Cell::SpecialType::MerkleProof);
return cs.fetch_ref();
}
td::uint32 MerkleTree::get_depth() const {
return log_n_;
}
td::Ref<vm::Cell> MerkleTree::get_root(size_t depth_limit) const {
if (depth_limit > log_n_ || root_proof_.is_null()) {
return root_proof_;
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
do_gen_proof(std::move(usage_cell), unpack_proof(root_proof_), depth_limit);
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
void MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) const {
if (depth_limit == 0) {
return;
}
// check if it is possible to load node without breaking virtualization
vm::CellSlice cs_raw(vm::NoVm(), std::move(node_raw));
if (cs_raw.is_special()) {
return;
}
vm::CellSlice cs(vm::NoVm(), std::move(node));
while (cs.have_refs()) {
do_gen_proof(cs.fetch_ref(), cs_raw.fetch_ref(), depth_limit - 1);
}
}
td::Bits256 MerkleTree::get_root_hash() const {
CHECK(root_hash_);
return root_hash_.value();
}
MerkleTree::MerkleTree(size_t chunks_count, td::Bits256 root_hash) {
init_begin(chunks_count);
root_hash_ = root_hash;
init_finish();
}
MerkleTree::MerkleTree(size_t chunks_count, td::Ref<vm::Cell> root_proof) {
init_begin(chunks_count);
root_hash_ = unpack_proof(root_proof)->get_hash(0).as_array();
root_proof_ = std::move(root_proof);
init_finish();
}
MerkleTree::MerkleTree(td::Span<Chunk> chunks) {
init_begin(chunks.size());
for (size_t i = 0; i < chunks.size(); i++) {
CHECK(chunks[i].index == i);
init_add_chunk(i, chunks[i].hash.as_slice());
}
init_finish();
}
void MerkleTree::init_begin(size_t chunks_count) {
log_n_ = 0;
while ((size_t(1) << log_n_) < chunks_count) {
log_n_++;
}
n_ = size_t(1) << log_n_;
total_blocks_ = chunks_count;
mark_.resize(n_ * 2);
proof_.resize(n_ * 2);
td::UInt256 null{};
auto cell = vm::CellBuilder().store_bytes(null.as_slice()).finalize();
for (auto i = chunks_count; i < n_; i++) {
proof_[i + n_] = cell;
}
}
void MerkleTree::init_add_chunk(size_t index, td::Slice hash) {
CHECK(index < total_blocks_);
CHECK(proof_[index + n_].is_null());
proof_[index + n_] = vm::CellBuilder().store_bytes(hash).finalize();
}
void MerkleTree::init_finish() {
for (size_t i = n_ - 1; i >= 1; i--) {
auto j = i * 2;
if (proof_[j].is_null()) {
continue;
}
if (i + 1 < n_ && proof_[i + 1].not_null() && proof_[j]->get_hash() == proof_[j + 2]->get_hash() &&
proof_[j + 1]->get_hash() == proof_[j + 3]->get_hash()) {
// minor optimization for same chunks
proof_[i] = proof_[i + 1];
} else {
proof_[i] = vm::CellBuilder().store_ref(proof_[j]).store_ref(proof_[j + 1]).finalize();
}
}
if (proof_[1].not_null()) {
init_proof();
}
CHECK(root_hash_);
}
void MerkleTree::remove_chunk(td::size_t index) {
CHECK(index < n_);
index += n_;
while (proof_[index].not_null()) {
proof_[index] = {};
index /= 2;
}
}
bool MerkleTree::has_chunk(td::size_t index) const {
CHECK(index < n_);
index += n_;
return proof_[index].not_null();
}
void MerkleTree::add_chunk(td::size_t index, td::Slice hash) {
CHECK(hash.size() == 32);
CHECK(index < n_);
index += n_;
auto cell = vm::CellBuilder().store_bytes(hash).finalize();
CHECK(proof_[index].is_null());
proof_[index] = std::move(cell);
mark_[index] = mark_id_;
for (index /= 2; index != 0; index /= 2) {
CHECK(proof_[index].is_null());
auto &left = proof_[index * 2];
auto &right = proof_[index * 2 + 1];
if (left.not_null() && right.not_null()) {
proof_[index] = vm::CellBuilder().store_ref(left).store_ref(right).finalize();
mark_[index] = mark_id_;
}
}
}
static td::Status do_validate(td::Ref<vm::Cell> ref, size_t depth) {
vm::CellSlice cs(vm::NoVm(), std::move(ref));
if (cs.is_special()) {
if (cs.special_type() != vm::Cell::SpecialType::PrunnedBranch) {
return td::Status::Error("Unexpected special cell");
}
return td::Status::OK();
}
if (depth == 0) {
if (cs.size() != 256) {
return td::Status::Error("List in proof must have 256 bits");
}
if (cs.size_refs() != 0) {
return td::Status::Error("List in proof must have zero refs");
}
} else {
if (cs.size() != 0) {
return td::Status::Error("Node in proof must have zero bits");
}
if (cs.size_refs() != 2) {
return td::Status::Error("Node in proof must have two refs");
}
TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1));
TRY_STATUS(do_validate(cs.fetch_ref(), depth - 1));
}
return td::Status::OK();
}
td::Status MerkleTree::validate_proof(td::Ref<vm::Cell> new_root) {
// 1. depth <= log_n
// 2. each non special node has two refs and nothing else
// 3. each list contains only hash
// 4. all special nodes are merkle proofs
vm::CellSlice cs(vm::NoVm(), new_root);
if (cs.special_type() != vm::Cell::SpecialType::MerkleProof) {
return td::Status::Error("Proof must be a mekle proof cell");
}
auto root = cs.fetch_ref();
if (root_hash_ && root->get_hash(0).as_slice() != root_hash_.value().as_slice()) {
return td::Status::Error("Proof has invalid root hash");
}
return do_validate(std::move(root), log_n_);
}
td::Status MerkleTree::add_proof(td::Ref<vm::Cell> new_root) {
CHECK(root_proof_.not_null() || root_hash_);
TRY_STATUS(validate_proof(new_root));
if (root_proof_.not_null()) {
auto combined = vm::MerkleProof::combine_fast(root_proof_, std::move(new_root));
if (combined.is_null()) {
return td::Status::Error("Can't combine proofs");
}
root_proof_ = std::move(combined);
} else {
root_proof_ = std::move(new_root);
}
return td::Status::OK();
}
td::Status MerkleTree::validate_existing_chunk(const Chunk &chunk) {
vm::CellSlice cs(vm::NoVm(), proof_[chunk.index + n_]);
CHECK(cs.size() == chunk.hash.size());
if (cs.as_bitslice().compare(chunk.hash.cbits()) != 0) {
return td::Status::Error("Hash mismatch");
}
return td::Status::OK();
}
td::Status MerkleTree::try_add_chunks(td::Span<Chunk> chunks) {
td::Bitset bitmask;
add_chunks(chunks, bitmask);
for (size_t i = 0; i < chunks.size(); i++) {
if (!bitmask.get(i)) {
return td::Status::Error(PSLICE() << "Invalid chunk #" << chunks[i].index);
}
}
return td::Status::OK();
}
void MerkleTree::add_chunks(td::Span<Chunk> chunks, td::Bitset &bitmask) {
if (root_proof_.is_null()) {
return;
}
mark_id_++;
bitmask.reserve(chunks.size());
for (size_t i = 0; i < chunks.size(); i++) {
const auto &chunk = chunks[i];
if (has_chunk(chunk.index)) {
if (validate_existing_chunk(chunk).is_ok()) {
bitmask.set_one(i);
}
continue;
}
add_chunk(chunk.index, chunk.hash.as_slice());
}
root_proof_ = vm::CellBuilder::create_merkle_proof(merge(unpack_proof(root_proof_), 1));
for (size_t i = 0; i < chunks.size(); i++) {
const auto &chunk = chunks[i];
if (has_chunk(chunk.index) && mark_[chunk.index + n_] == mark_id_) {
bitmask.set_one(i);
}
}
}
td::Ref<vm::Cell> MerkleTree::merge(td::Ref<vm::Cell> root, size_t index) {
const auto &down = proof_[index];
if (down.not_null()) {
if (down->get_hash() != root->get_hash(0)) {
proof_[index] = {};
} else {
return down;
}
}
if (mark_[index] != mark_id_ || index >= n_) {
return root;
}
vm::CellSlice cs(vm::NoVm(), root);
if (cs.is_special()) {
cleanup_add(index);
return root;
}
CHECK(cs.size_refs() == 2);
vm::CellBuilder cb;
cb.store_bits(cs.fetch_bits(cs.size()));
auto left = merge(cs.fetch_ref(), index * 2);
auto right = merge(cs.fetch_ref(), index * 2 + 1);
cb.store_ref(std::move(left)).store_ref(std::move(right));
return cb.finalize();
}
void MerkleTree::cleanup_add(size_t index) {
if (mark_[index] != mark_id_) {
return;
}
proof_[index] = {};
if (index >= n_) {
return;
}
cleanup_add(index * 2);
cleanup_add(index * 2 + 1);
}
void MerkleTree::init_proof() {
CHECK(proof_[1].not_null());
td::Bits256 new_root_hash = proof_[1]->get_hash(0).as_array();
CHECK(!root_hash_ || root_hash_.value() == new_root_hash);
root_hash_ = new_root_hash;
root_proof_ = vm::CellBuilder::create_merkle_proof(proof_[1]);
}
td::Result<td::Ref<vm::Cell>> MerkleTree::gen_proof(size_t l, size_t r) {
if (root_proof_.is_null()) {
return td::Status::Error("got no proofs yet");
}
auto usage_tree = std::make_shared<vm::CellUsageTree>();
auto root_raw = vm::MerkleProof::virtualize(root_proof_, 1);
auto usage_cell = vm::UsageCell::create(root_raw, usage_tree->root_ptr());
TRY_STATUS(TRY_VM(do_gen_proof(std::move(usage_cell), 0, n_ - 1, l, r)));
auto res = vm::MerkleProof::generate(root_raw, usage_tree.get());
CHECK(res.not_null());
return res;
}
td::Status MerkleTree::do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) const {
if (ir < l || il > r) {
return td::Status::OK();
}
if (l <= il && ir <= r) {
return td::Status::OK();
}
vm::CellSlice cs(vm::NoVm(), std::move(node));
if (cs.is_special()) {
return td::Status::Error("Can't generate a proof");
}
CHECK(cs.size_refs() == 2);
auto ic = (il + ir) / 2;
TRY_STATUS(do_gen_proof(cs.fetch_ref(), il, ic, l, r));
TRY_STATUS(do_gen_proof(cs.fetch_ref(), ic + 1, ir, l, r));
return td::Status::OK();
}
} // namespace ton

77
storage/MerkleTree.h Normal file
View file

@ -0,0 +1,77 @@
#pragma once
#include "td/utils/optional.h"
#include "td/utils/Slice.h"
#include "vm/cells.h"
#include "Bitset.h"
namespace ton {
// merkle_node$_ {n:#} left:^(ton::MerkleTree n) right:^(ton::MerkleTree n) = ton::MerkleTree (n + 1);
// merkle_leaf$_ hash:bits256 = ton::MerkleTree 0;
class MerkleTree {
public:
td::uint32 get_depth() const;
td::Ref<vm::Cell> get_root(size_t depth_limit = std::numeric_limits<size_t>::max()) const;
td::Bits256 get_root_hash() const;
MerkleTree(size_t chunks_count, td::Bits256 root_hash);
MerkleTree(size_t chunks_count, td::Ref<vm::Cell> root_proof);
struct Chunk {
td::size_t index{0};
td::Bits256 hash;
};
explicit MerkleTree(td::Span<Chunk> chunks);
MerkleTree() = default;
void init_begin(size_t chunks_count);
void init_add_chunk(td::size_t index, td::Slice hash);
void init_finish();
// merge external proof with an existing proof
td::Status add_proof(td::Ref<vm::Cell> new_root);
// generate proof for all chunks from l to r inclusive
td::Result<td::Ref<vm::Cell>> gen_proof(size_t l, size_t r);
// Trying to add and validate list of chunks simultaniously
td::Status try_add_chunks(td::Span<Chunk> chunks);
// Returns bitmask of successfully added chunks
// Intended to be used during validation of a torrent.
// We got arbitrary chunks read from disk, and we got an arbirary proof.
// Now we can say about some chunks that they are correct. This ia a general way
// to do this.
//
// NB: already added chunks are simply validated. One should be careful
// not to process them twice
void add_chunks(td::Span<Chunk> chunks, td::Bitset &bitmask);
private:
td::uint64 total_blocks_;
td::size_t n_; // n = 2^log_n
td::uint32 log_n_;
td::size_t mark_id_{0};
std::vector<td::size_t> mark_; // n_ * 2
std::vector<td::Ref<vm::Cell>> proof_; // n_ * 2
td::optional<td::Bits256> root_hash_;
td::Ref<vm::Cell> root_proof_;
td::Status validate_proof(td::Ref<vm::Cell> new_root);
bool has_chunk(td::size_t index) const;
void remove_chunk(td::size_t index);
void add_chunk(td::size_t index, td::Slice hash);
void init_proof();
td::Ref<vm::Cell> merge(td::Ref<vm::Cell> root, size_t index);
void cleanup_add(size_t index);
td::Status do_gen_proof(td::Ref<vm::Cell> node, size_t il, size_t ir, size_t l, size_t r) const;
void do_gen_proof(td::Ref<vm::Cell> node, td::Ref<vm::Cell> node_raw, size_t depth_limit) const;
td::Status validate_existing_chunk(const Chunk &chunk);
};
} // namespace ton

379
storage/NodeActor.cpp Normal file
View file

@ -0,0 +1,379 @@
#include "NodeActor.h"
#include "vm/boc.h"
#include "td/utils/Enumerator.h"
#include "td/utils/tests.h"
namespace ton {
NodeActor::NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr<Callback> callback, bool should_download)
: self_id_(self_id)
, torrent_(std::move(torrent))
, callback_(std::move(callback))
, should_download_(should_download)
, parts_helper_(torrent.get_info().pieces_count()) {
}
void NodeActor::start_peer(PeerId peer_id, td::Promise<td::actor::ActorId<PeerActor>> promise) {
peers_[peer_id];
loop();
auto it = peers_.find(peer_id);
if (it == peers_.end() || it->second.actor.empty()) {
promise.set_error(td::Status::Error("Won't start peer now"));
return;
}
promise.set_value(it->second.actor.get());
}
void NodeActor::on_signal_from_peer(PeerId peer_id) {
loop_peer(peer_id, peers_[peer_id]);
}
void NodeActor::start_up() {
callback_->register_self(actor_id(this));
auto pieces_count = torrent_.get_info().pieces_count();
parts_.parts.resize(pieces_count);
auto header = torrent_.get_header_parts_range();
for (td::uint32 i = static_cast<td::uint32>(header.begin); i < header.end; i++) {
parts_helper_.set_part_priority(i, 255);
}
for (td::uint32 i = 0; i < pieces_count; i++) {
if (torrent_.is_piece_ready(i)) {
parts_helper_.on_self_part_ready(i);
parts_.parts[i].ready = true;
}
}
loop();
}
void NodeActor::loop_will_upload() {
if (peers_.empty()) {
return;
}
if (!will_upload_at_.is_in_past()) {
alarm_timestamp().relax(will_upload_at_);
return;
}
will_upload_at_ = td::Timestamp::in(5);
alarm_timestamp().relax(will_upload_at_);
std::vector<std::tuple<bool, bool, double, PeerId>> peers;
for (auto &it : peers_) {
auto state = it.second.state.lock();
bool needed = false;
if (state->peer_state_) {
needed = state->peer_state_.value().want_download;
}
peers.emplace_back(!needed, !state->node_state_.want_download, -state->download.speed(), it.first);
}
std::sort(peers.begin(), peers.end());
if (peers.size() > 5) {
std::swap(peers[4], peers[td::Random::fast(5, (int)peers.size() - 1)]);
peers.resize(5);
}
std::set<PeerId> peers_set;
for (auto id : peers) {
peers_set.insert(std::get<PeerId>(id));
}
for (auto &it : peers_) {
auto will_upload = peers_set.count(it.first) > 0;
auto state = it.second.state.lock();
if (state->node_state_.will_upload != will_upload) {
state->node_state_.will_upload = will_upload;
state->notify_peer();
}
}
}
void NodeActor::loop() {
loop_get_peers();
loop_start_stop_peers();
loop_queries();
loop_will_upload();
if (!ready_parts_.empty()) {
for (auto &it : peers_) {
auto state = it.second.state.lock();
state->node_ready_parts_.insert(state->node_ready_parts_.end(), ready_parts_.begin(), ready_parts_.end());
state->notify_peer();
}
ready_parts_.clear();
}
if (torrent_.is_completed() && !is_completed_) {
is_completed_ = true;
callback_->on_completed();
}
}
std::string NodeActor::get_stats_str() {
td::StringBuilder sb;
sb << "Node " << self_id_ << " " << torrent_.get_ready_parts_count() << "\t" << download_;
sb << "\toutq " << parts_.total_queries;
sb << "\n";
for (auto &it : peers_) {
auto state = it.second.state.lock();
sb << "\tPeer " << it.first;
sb << "\t" << parts_helper_.get_ready_parts(it.second.peer_token).ones_count();
sb << "\t" << state->download;
if (state->peer_state_) {
auto &peer_state = state->peer_state_.value();
sb << "\t up:" << peer_state.will_upload;
sb << "\tdown:" << peer_state.want_download;
sb << "\tcnt:" << parts_helper_.get_want_download_count(it.second.peer_token);
}
sb << "\toutq:" << state->node_queries_.size();
sb << "\tinq:" << state->peer_queries_.size();
auto &node_state = state->node_state_;
sb << "\tNup:" << node_state.will_upload;
sb << "\tNdown:" << node_state.want_download;
sb << "\n";
}
auto o_n = torrent_.get_files_count();
if (o_n) {
// by default all parts priority == 1
auto n = o_n.unwrap();
file_priority_.resize(n, 1);
for (size_t i = 0; i < n; i++) {
auto size = torrent_.get_file_size(i);
auto ready_size = torrent_.get_file_ready_size(i);
sb << "#" << i << " " << torrent_.get_file_name(i) << "\t" << 100 * ready_size / size << "%% "
<< td::format::as_size(ready_size) << "/" << td::format::as_size(size) << "\t priority=" << file_priority_[i]
<< "\n";
}
}
return sb.as_cslice().str();
}
void NodeActor::set_file_priority(size_t i, td::uint8 priority) {
auto o_files_count = torrent_.get_files_count();
if (!o_files_count) {
return;
}
auto files_count = o_files_count.unwrap();
if (file_priority_.size() != files_count) {
// by default all parts priority == 1
file_priority_.resize(files_count, 1);
}
if (i >= files_count) {
for (td::uint32 part_i = 0; part_i < torrent_.get_info().pieces_count(); part_i++) {
parts_helper_.set_part_priority(part_i, priority);
}
for (auto &p : file_priority_) {
p = priority;
}
return;
}
if (file_priority_[i] == priority) {
return;
}
file_priority_[i] = priority;
auto range = torrent_.get_file_parts_range(i);
td::uint32 begin = static_cast<td::uint32>(range.begin);
td::uint32 end = static_cast<td::uint32>(range.end);
for (td::uint32 i = begin; i < end; i++) {
if (i == begin || i + 1 == end) {
auto chunks = torrent_.chunks_by_piece(i);
td::uint8 max_priority = 0;
for (auto chunk_id : chunks) {
if (chunk_id == 0) {
max_priority = 255;
} else {
max_priority = td::max(max_priority, file_priority_[chunk_id - 1]);
}
}
parts_helper_.set_part_priority(i, max_priority);
} else {
parts_helper_.set_part_priority(i, priority);
}
}
yield();
}
void NodeActor::set_should_download(bool should_download) {
should_download_ = should_download;
yield();
}
void NodeActor::tear_down() {
callback_->on_closed(std::move(torrent_));
}
void NodeActor::loop_start_stop_peers() {
for (auto &it : peers_) {
auto &peer = it.second;
auto peer_id = it.first;
if (peer.notifier.empty()) {
peer.notifier = td::actor::create_actor<Notifier>("Notifier", actor_id(this), peer_id);
}
if (peer.actor.empty()) {
LOG(ERROR) << "Init Peer " << self_id_ << " -> " << peer_id;
auto state = peer.state.lock();
state->node = peer.notifier.get();
for (td::uint32 i = 0; i < parts_.parts.size(); i++) {
if (parts_.parts[i].ready) {
state->node_ready_parts_.push_back(i);
}
}
peer.peer_token = parts_helper_.register_peer(peer_id);
peer.actor = callback_->create_peer(self_id_, peer_id, peer.state);
}
}
}
void NodeActor::loop_queries() {
if (!should_download_) {
return;
}
for (auto &it : peers_) {
auto peer_token = it.second.peer_token;
auto state = it.second.state.lock();
if (!state->peer_state_) {
parts_helper_.set_peer_limit(peer_token, 0);
continue;
}
if (!state->peer_state_.value().will_upload) {
parts_helper_.set_peer_limit(peer_token, 0);
continue;
}
parts_helper_.set_peer_limit(peer_token,
td::narrow_cast<td::uint32>(MAX_PEER_TOTAL_QUERIES - state->node_queries_.size()));
}
auto parts = parts_helper_.get_rarest_parts(MAX_TOTAL_QUERIES);
for (auto &part : parts) {
auto it = peers_.find(part.peer_id);
CHECK(it != peers_.end());
auto state = it->second.state.lock();
CHECK(state->peer_state_);
CHECK(state->peer_state_.value().will_upload);
CHECK(state->node_queries_.size() < MAX_PEER_TOTAL_QUERIES);
auto part_id = part.part_id;
state->node_queries_[static_cast<td::uint32>(part_id)];
parts_helper_.lock_part(part_id);
parts_.total_queries++;
parts_.parts[part_id].query_to_peer = part.peer_id;
state->notify_peer();
}
}
void NodeActor::loop_get_peers() {
if (has_get_peers_) {
return;
}
if (next_get_peers_at_.is_in_past()) {
callback_->get_peers(promise_send_closure(td::actor::actor_id(this), &NodeActor::got_peers));
has_get_peers_ = true;
return;
}
alarm_timestamp().relax(next_get_peers_at_);
}
void NodeActor::got_peers(td::Result<std::vector<PeerId>> r_peers) {
if (r_peers.is_error()) {
next_get_peers_at_ = td::Timestamp::in(GET_PEER_RETRY_TIMEOUT);
} else {
auto peers = r_peers.move_as_ok();
for (auto &peer : peers) {
if (peer == self_id_) {
continue;
}
peers_[peer];
}
next_get_peers_at_ = td::Timestamp::in(GET_PEER_EACH);
}
has_get_peers_ = false;
loop();
}
void NodeActor::loop_peer(const PeerId &peer_id, Peer &peer) {
auto state = peer.state.lock();
CHECK(!state->peer.empty());
for (auto part_id : state->peer_ready_parts_) {
parts_helper_.on_peer_part_ready(peer.peer_token, part_id);
}
state->peer_ready_parts_.clear();
// Answer queries from peer
bool should_notify_peer = false;
auto want_download = parts_helper_.get_want_download_count(peer.peer_token) > 0;
if (state->node_state_.want_download != want_download) {
state->node_state_.want_download = want_download;
should_notify_peer = true;
}
for (auto it = state->peer_queries_.begin(); it != state->peer_queries_.end();) {
if (it->second) {
it++;
} else {
should_notify_peer = true;
it->second = [&]() -> td::Result<PeerState::Part> {
if (!state->node_state_.will_upload) {
return td::Status::Error("Won't upload");
}
TRY_RESULT(proof, torrent_.get_piece_proof(it->first));
TRY_RESULT(data, torrent_.get_piece_data(it->first));
PeerState::Part res;
TRY_RESULT(proof_serialized, vm::std_boc_serialize(std::move(proof)));
res.proof = std::move(proof_serialized);
res.data = td::BufferSlice(std::move(data));
return std::move(res);
}();
}
}
// Handle results from peer
for (auto it = state->node_queries_.begin(); it != state->node_queries_.end();) {
if (it->second) {
auto part_id = it->first;
auto r_unit = it->second.unwrap().move_fmap([&](PeerState::Part part) -> td::Result<td::Unit> {
TRY_RESULT(proof, vm::std_boc_deserialize(part.proof));
TRY_STATUS(torrent_.add_piece(part_id, part.data.as_slice(), std::move(proof)));
download_.add(part.data.size(), td::Timestamp::now());
return td::Unit();
});
parts_.parts[part_id].query_to_peer = {};
parts_.total_queries--;
it = state->node_queries_.erase(it);
parts_helper_.unlock_part(part_id);
if (r_unit.is_ok()) {
on_part_ready(part_id);
} else {
//LOG(ERROR) << "Failed " << part_id;
}
} else {
it++;
}
}
if (should_notify_peer) {
state->notify_peer();
}
yield();
}
void NodeActor::on_part_ready(PartId part_id) {
parts_helper_.on_self_part_ready(part_id);
CHECK(!parts_.parts[part_id].ready);
parts_.parts[part_id].ready = true;
for (auto &peer : peers_) {
// TODO: notify only peer want_download_count == 0
peer.second.state.unsafe()->notify_node();
}
ready_parts_.push_back(part_id);
}
} // namespace ton

127
storage/NodeActor.h Normal file
View file

@ -0,0 +1,127 @@
#pragma once
#include "LoadSpeed.h"
#include "PartsHelper.h"
#include "PeerActor.h"
#include "Torrent.h"
#include "td/utils/Random.h"
#include <map>
namespace ton {
class NodeActor : public td::actor::Actor {
public:
class Callback {
public:
virtual ~Callback() {
}
virtual td::actor::ActorOwn<PeerActor> create_peer(PeerId self_id, PeerId peer_id,
td::SharedState<PeerState> state) = 0;
virtual void get_peers(td::Promise<std::vector<PeerId>> peers) = 0;
virtual void register_self(td::actor::ActorId<ton::NodeActor> self) = 0;
//TODO: proper callbacks
virtual void on_completed() = 0;
virtual void on_closed(ton::Torrent torrent) = 0;
};
NodeActor(PeerId self_id, ton::Torrent torrent, td::unique_ptr<Callback> callback, bool should_download = true);
void start_peer(PeerId peer_id, td::Promise<td::actor::ActorId<PeerActor>> promise);
ton::Torrent *with_torrent() {
return &torrent_;
}
std::string get_stats_str();
void set_file_priority(size_t i, td::uint8 priority);
void set_should_download(bool should_download);
private:
PeerId self_id_;
ton::Torrent torrent_;
std::vector<td::uint8> file_priority_;
td::unique_ptr<Callback> callback_;
bool should_download_{false};
class Notifier : public td::actor::Actor {
public:
Notifier(td::actor::ActorId<NodeActor> node, PeerId peer_id) : node_(std::move(node)), peer_id_(peer_id) {
}
void wake_up() override {
send_closure(node_, &NodeActor::on_signal_from_peer, peer_id_);
}
private:
td::actor::ActorId<NodeActor> node_;
PeerId peer_id_;
};
struct Peer {
td::actor::ActorOwn<PeerActor> actor;
td::actor::ActorOwn<Notifier> notifier;
td::SharedState<PeerState> state;
PartsHelper::PeerToken peer_token;
};
std::map<PeerId, Peer> peers_;
struct QueryId {
PeerId peer;
PartId part;
auto key() const {
return std::tie(peer, part);
}
bool operator<(const QueryId &other) const {
return key() < other.key();
}
};
struct PartsSet {
struct Info {
td::optional<PeerId> query_to_peer;
bool ready{false};
};
size_t total_queries{0};
std::vector<Info> parts;
};
PartsSet parts_;
PartsHelper parts_helper_;
std::vector<PartId> ready_parts_;
LoadSpeed download_;
td::Timestamp next_get_peers_at_;
bool has_get_peers_{false};
static constexpr double GET_PEER_RETRY_TIMEOUT = 5;
static constexpr double GET_PEER_EACH = 5;
bool is_completed_{false};
td::Timestamp will_upload_at_;
void on_signal_from_peer(PeerId peer_id);
void start_up() override;
void loop() override;
void tear_down() override;
void loop_start_stop_peers();
static constexpr size_t MAX_TOTAL_QUERIES = 20;
static constexpr size_t MAX_PEER_TOTAL_QUERIES = 5;
void loop_queries();
bool try_send_query();
bool try_send_part(PartId part_id);
void loop_get_peers();
void got_peers(td::Result<std::vector<PeerId>> r_peers);
void loop_peer(const PeerId &peer_id, Peer &peer);
void on_part_ready(PartId part_id);
void loop_will_upload();
};
} // namespace ton

259
storage/PartsHelper.h Normal file
View file

@ -0,0 +1,259 @@
#include "PeerState.h"
#include "Bitset.h"
#include "td/utils/Random.h"
#include "td/utils/Status.h"
namespace ton {
struct PartsHelper {
public:
PartsHelper(size_t parts_count) : parts_(parts_count), peers_(64) {
peers_[0].is_valid = true;
}
using PartId = size_t;
using PeerToken = size_t;
PeerToken register_self() {
return self_token_;
}
PeerToken register_peer(PeerId peer_id) {
PeerToken new_peer_token{free_peer_tokens_.empty() ? next_peer_token_ : free_peer_tokens_.back()};
auto it = peer_id_to_token_.emplace(peer_id, new_peer_token);
if (it.second) {
if (free_peer_tokens_.empty()) {
next_peer_token_++;
peers_.resize(next_peer_token_);
} else {
free_peer_tokens_.pop_back();
}
auto peer = get_peer(new_peer_token, true);
peer->is_valid = true;
peer->peer_id = peer_id;
peer->want_download_count = 0;
}
return it.first->second;
}
void forget_peer(PeerToken peer_token) {
CHECK(peer_token != self_token_);
free_peer_tokens_.push_back(peer_token);
auto *peer = get_peer(peer_token);
peer_id_to_token_.erase(get_peer(peer_token)->peer_id);
*peer = Peer{};
peer->rnd = td::Random::fast_uint32();
}
void set_peer_limit(PeerToken peer, td::uint32 limit) {
get_peer(peer)->limit = limit;
}
void on_peer_part_ready(PeerToken peer_token, PartId part_id) {
auto peer = get_peer(peer_token);
if (!peer->ready_parts.set_one(part_id)) {
return;
}
auto part = get_part(part_id);
if (part->is_ready) {
return;
}
peer->want_download_count++;
if (!part->rnd) {
part->rnd = td::Random::fast_uint32();
}
change_key(part_id, part->rnd, part->peers_count, part->peers_count + 1, part->priority, part->priority);
part->peers_count++;
}
void lock_part(PartId part_id) {
auto *part = get_part(part_id);
CHECK(!part->is_locked);
part->is_locked = true;
}
void unlock_part(PartId part_id) {
auto *part = get_part(part_id);
CHECK(part->is_locked);
part->is_locked = false;
}
void set_part_priority(PartId part_id, td::uint8 priority) {
auto *part = get_part(part_id);
if (part->is_ready) {
return;
}
change_key(part_id, part->rnd, part->peers_count, part->peers_count, part->priority, priority);
part->priority = priority;
}
td::uint8 get_part_priority(PartId part_id) {
auto *part = get_part(part_id);
return part->priority;
}
void on_self_part_ready(PartId part_id) {
auto peer = get_peer(self_token_);
if (!peer->ready_parts.set_one(part_id)) {
return;
}
auto part = get_part(part_id);
CHECK(!part->is_ready);
part->is_ready = true;
for (auto &peer : peers_) {
if (peer.ready_parts.get(part_id)) {
peer.want_download_count--;
}
}
change_key(part_id, part->rnd, part->peers_count, 0, part->priority, part->priority);
}
struct RarePart {
PartId part_id;
PeerId peer_id;
};
std::vector<RarePart> get_rarest_parts(size_t max_count) {
struct It {
std::set<Peer::Key>::iterator begin, end;
PeerId peer_id;
td::uint32 limit{0};
bool empty() const {
return begin == end || limit == 0;
}
auto key() const {
return std::tie(*begin, peer_id);
}
bool operator<(const It &other) const {
return key() < other.key();
}
};
std::set<It> its;
for (auto &peer : peers_) {
if (!peer.is_valid || peer.limit == 0) {
continue;
}
It it;
it.begin = peer.rarest_parts.begin();
it.end = peer.rarest_parts.end();
it.peer_id = peer.peer_id;
it.limit = peer.limit;
if (it.empty()) {
continue;
}
its.insert(it);
}
std::vector<RarePart> res;
while (res.size() < max_count && !its.empty()) {
auto it = *its.begin();
its.erase(its.begin());
auto part_id = it.begin->part_id;
if ((res.empty() || res.back().part_id != part_id) && !get_part(part_id)->is_locked) {
res.push_back({part_id, it.peer_id});
CHECK(get_peer(register_peer(it.peer_id))->ready_parts.get(part_id));
it.limit--;
}
it.begin++;
if (it.empty()) {
continue;
}
its.insert(it);
}
return res;
}
td::uint32 get_want_download_count(PeerToken peer_token) {
return get_peer(peer_token, false)->want_download_count;
}
const td::Bitset &get_ready_parts(PeerToken peer_token) {
return get_peer(peer_token, false)->ready_parts;
}
private:
PeerToken self_token_{0};
size_t parts_count_;
struct Part {
bool is_locked{false};
bool is_ready{false};
td::uint8 priority{1};
td::uint32 rnd{0};
td::uint32 peers_count{0}; // invalid for ready parts
};
struct Peer {
PeerId peer_id{0};
bool is_valid{false};
td::uint32 rnd{0};
td::uint32 limit{0};
td::Bitset ready_parts;
// sum_i (peer.ready_parts[i] && !node.ready_parts[i])
td::uint32 want_download_count{0};
// peers_count - count of peers which has this part
// key_count = !is_ready * peers_count;
struct Key {
td::uint8 priority{0};
td::uint32 count{0};
PartId part_id{0};
td::uint32 rnd{0};
auto key() const {
return std::make_tuple(255 - priority, count, rnd, part_id);
}
bool operator<(const Key &other) const {
return key() < other.key();
}
};
std::set<Key> rarest_parts; // TODO: use vector instead of set
};
std::vector<Part> parts_;
std::vector<Peer> peers_;
td::uint32 next_peer_token_{1};
std::map<PeerId, PeerToken> peer_id_to_token_;
std::vector<PeerToken> free_peer_tokens_;
Part *get_part(PartId part_id) {
CHECK(part_id < parts_.size());
return &parts_[part_id];
}
Peer *get_peer(PeerToken peer_token, bool can_be_uninited = false) {
CHECK(peer_token < peers_.size());
auto res = &peers_[peer_token];
CHECK(res->is_valid || can_be_uninited);
return res;
}
void change_key(PartId part_id, td::uint32 rnd, td::uint32 from_count, td::uint32 to_count, td::uint8 from_priority,
td::uint8 to_priority) {
if (from_count == 0 && to_count == 0) {
return;
}
if (from_count == to_count && from_priority == to_priority) {
return;
}
for (auto &peer : peers_) {
if (!peer.is_valid) {
continue;
}
if (peer.peer_id == 0) {
continue;
}
if (!peer.ready_parts.get(part_id)) {
continue;
}
//TODO: xor is not a perfect solution as it keeps a lot order between part_ids
Peer::Key key;
key.part_id = part_id;
key.rnd = rnd ^ peer.rnd;
if (from_count != 0 && from_priority != 0) {
key.count = from_count;
key.priority = from_priority;
peer.rarest_parts.erase(key);
}
if (to_count != 0 && to_priority != 0) {
key.count = to_count;
key.priority = to_priority;
peer.rarest_parts.insert(key);
}
}
}
};
} // namespace ton

400
storage/PeerActor.cpp Normal file
View file

@ -0,0 +1,400 @@
#include "PeerActor.h"
#include "auto/tl/ton_api.hpp"
#include "tl-utils/tl-utils.hpp"
#include "td/utils/overloaded.h"
#include "td/utils/Random.h"
namespace ton {
PeerState::State from_ton_api(const ton::ton_api::storage_state &state) {
PeerState::State res;
res.want_download = state.want_download_;
res.will_upload = state.will_upload_;
return res;
}
ton::ton_api::object_ptr<ton::ton_api::storage_state> to_ton_api(const PeerState::State &state) {
return ton::ton_api::make_object<ton::ton_api::storage_state>(state.will_upload, state.want_download);
}
PeerActor::PeerActor(td::unique_ptr<Callback> callback, td::SharedState<PeerState> state)
: callback_(std::move(callback)), state_(std::move(state)) {
CHECK(callback_);
}
template <class T, class... ArgsT>
td::uint64 PeerActor::create_and_send_query(ArgsT &&... args) {
return send_query(ton::create_serialize_tl_object<T>(std::forward<ArgsT>(args)...));
}
td::uint64 PeerActor::send_query(td::BufferSlice query) {
auto query_id = next_query_id_++;
//LOG(ERROR) << "send_query " << to_string(ton::fetch_tl_object<ton::ton_api::Function>(std::move(query), true).ok());
callback_->send_query(query_id, std::move(query));
return query_id;
}
void PeerActor::schedule_loop() {
yield();
}
void PeerActor::notify_node() {
need_notify_node_ = true;
}
void PeerActor::execute_query(td::BufferSlice query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, f, ton::fetch_tl_object<ton::ton_api::Function>(std::move(query), true));
//LOG(ERROR) << "execute_query " << to_string(f);
ton::ton_api::downcast_call(
*f, td::overloaded(
[&](ton::ton_api::storage_ping &ping) {
execute_ping(static_cast<td::uint64>(ping.session_id_), std::move(promise));
},
[&](ton::ton_api::storage_addUpdate &add_update) { execute_add_update(add_update, std::move(promise)); },
[&](ton::ton_api::storage_getPiece &get_piece) { execute_get_piece(get_piece, std::move(promise)); },
[&](auto &other) { promise.set_error(td::Status::Error("Unknown function")); }));
schedule_loop();
}
void PeerActor::on_ping_result(td::Result<td::BufferSlice> r_answer) {
ping_query_id_ = {};
if (r_answer.is_ok()) {
on_pong();
}
}
void PeerActor::on_pong() {
wait_pong_till_ = td::Timestamp::in(4);
state_.lock()->peer_online_ = true;
notify_node();
}
void PeerActor::on_update_result(td::Result<td::BufferSlice> r_answer) {
update_query_id_ = {};
if (r_answer.is_ok()) {
peer_is_inited_ = true;
have_pieces_list_.clear();
}
}
void PeerActor::on_get_piece_result(PartId piece_id, td::Result<td::BufferSlice> r_answer) {
auto state = state_.lock();
auto it = state->node_queries_.find(piece_id);
if (it == state->node_queries_.end()) {
LOG(ERROR) << "???";
return;
}
//TODO: handle errors ???
it->second = [&]() -> td::Result<PeerState::Part> {
TRY_RESULT(slice, std::move(r_answer));
TRY_RESULT(piece, ton::fetch_result<ton::ton_api::storage_getPiece>(slice.as_slice()));
PeerState::Part res;
res.data = std::move(piece->data_);
res.proof = std::move(piece->proof_);
return std::move(res);
}();
notify_node();
}
void PeerActor::on_update_state_result(td::Result<td::BufferSlice> r_answer) {
if (r_answer.is_error()) {
update_state_query_.query_id = {};
}
}
void PeerActor::on_query_result(td::uint64 query_id, td::Result<td::BufferSlice> r_answer) {
if (r_answer.is_ok()) {
on_pong();
state_.lock()->download.add(r_answer.ok().size(), td::Timestamp::now());
}
if (ping_query_id_ && ping_query_id_.value() == query_id) {
on_ping_result(std::move(r_answer));
} else if (update_query_id_ && update_query_id_.value() == query_id) {
on_update_result(std::move(r_answer));
} else if (update_state_query_.query_id && update_state_query_.query_id.value() == query_id) {
on_update_state_result(std::move(r_answer));
} else {
for (auto &query_it : node_get_piece_) {
if (query_it.second.query_id && query_it.second.query_id.value() == query_id) {
on_get_piece_result(query_it.first, std::move(r_answer));
query_it.second.query_id = {};
}
}
}
schedule_loop();
}
void PeerActor::start_up() {
callback_->register_self(actor_id(this));
node_session_id_ = td::Random::secure_uint64();
auto state = state_.lock();
state->peer = actor_id(this);
notify_node();
schedule_loop();
}
void PeerActor::loop() {
loop_ping();
loop_pong();
loop_update_init();
loop_update_state();
loop_update_pieces();
loop_node_get_piece();
loop_peer_get_piece();
loop_notify_node();
}
void PeerActor::loop_pong() {
if (wait_pong_till_ && wait_pong_till_.is_in_past()) {
wait_pong_till_ = {};
LOG(INFO) << "Disconnected";
state_.lock()->peer_online_ = false;
notify_node();
}
alarm_timestamp().relax(wait_pong_till_);
}
void PeerActor::loop_ping() {
if (ping_query_id_) {
return;
}
if (!next_ping_at_.is_in_past()) {
alarm_timestamp().relax(next_ping_at_);
return;
}
next_ping_at_ = td::Timestamp::in(2);
alarm_timestamp().relax(next_ping_at_);
ping_query_id_ = create_and_send_query<ton::ton_api::storage_ping>(node_session_id_);
}
td::BufferSlice PeerActor::create_update_query(ton::tl_object_ptr<ton::ton_api::storage_Update> update) {
auto session_id = static_cast<td::int64>(peer_session_id_.value());
auto seqno = static_cast<td::int32>(++node_seqno_);
return ton::create_serialize_tl_object<ton::ton_api::storage_addUpdate>(session_id, seqno, std::move(update));
}
void PeerActor::loop_update_init() {
if (!peer_session_id_) {
return;
}
if (update_query_id_) {
return;
}
if (peer_is_inited_) {
return;
}
update_have_pieces();
have_pieces_list_.clear();
auto state = state_.lock();
auto query = create_update_query(ton::create_tl_object<ton::ton_api::storage_updateInit>(
td::BufferSlice(have_pieces_.as_slice()), to_ton_api(state->node_state_)));
// take care about update_state_query initial state
update_state_query_.state = state->node_state_;
update_state_query_.query_id = 0;
update_query_id_ = send_query(std::move(query));
}
void PeerActor::loop_update_state() {
if (!peer_is_inited_) {
return;
}
auto state = state_.lock();
if (!(update_state_query_.state == state->node_state_)) {
update_state_query_.state = state->node_state_;
update_state_query_.query_id = {};
}
if (update_state_query_.query_id) {
return;
}
auto query = create_update_query(
ton::create_tl_object<ton::ton_api::storage_updateState>(to_ton_api(update_state_query_.state)));
update_state_query_.query_id = send_query(std::move(query));
}
void PeerActor::update_have_pieces() {
auto state = state_.lock();
have_pieces_list_.insert(have_pieces_list_.end(), state->node_ready_parts_.begin(), state->node_ready_parts_.end());
for (auto piece_id : state->node_ready_parts_) {
have_pieces_.set_one(piece_id);
}
state->node_ready_parts_.clear();
}
void PeerActor::loop_update_pieces() {
if (update_query_id_) {
return;
}
if (!peer_is_inited_) {
return;
}
update_have_pieces();
if (!have_pieces_list_.empty()) {
auto query = create_update_query(ton::create_tl_object<ton::ton_api::storage_updateHavePieces>(
td::transform(have_pieces_list_, [](auto x) { return static_cast<td::int32>(x); })));
update_query_id_ = send_query(std::move(query));
}
}
void PeerActor::loop_node_get_piece() {
auto state = state_.lock();
for (auto it = node_get_piece_.begin(); it != node_get_piece_.end();) {
auto other_it = state->node_queries_.find(it->first);
if (other_it == state->node_queries_.end() || other_it->second) {
it = node_get_piece_.erase(it);
} else {
it++;
}
}
for (auto &query_it : state->node_queries_) {
if (query_it.second) {
continue;
}
node_get_piece_.emplace(query_it.first, NodePieceQuery{});
}
for (auto &query_it : node_get_piece_) {
if (query_it.second.query_id) {
continue;
}
query_it.second.query_id =
create_and_send_query<ton::ton_api::storage_getPiece>(static_cast<td::int32>(query_it.first));
}
}
void PeerActor::loop_peer_get_piece() {
auto state = state_.lock();
// process answers
for (auto &it : state->peer_queries_) {
if (!it.second) {
continue;
}
auto promise_it = peer_get_piece_.find(it.first);
if (promise_it == peer_get_piece_.end()) {
continue;
}
promise_it->second.promise.set_result(it.second.unwrap().move_map([](PeerState::Part part) {
return ton::create_serialize_tl_object<ton::ton_api::storage_piece>(std::move(part.proof), std::move(part.data));
}));
peer_get_piece_.erase(promise_it);
}
// erase unneeded queries
for (auto it = state->peer_queries_.begin(); it != state->peer_queries_.end();) {
if (peer_get_piece_.count(it->first) == 0) {
it = state->peer_queries_.erase(it);
notify_node();
} else {
it++;
}
}
// create queries
for (auto &query_it : peer_get_piece_) {
auto res = state->peer_queries_.emplace(query_it.first, td::optional<td::Result<PeerState::Part>>());
if (res.second) {
notify_node();
}
}
}
void PeerActor::loop_notify_node() {
if (!need_notify_node_) {
return;
}
need_notify_node_ = false;
state_.lock()->notify_node();
}
void PeerActor::execute_ping(td::uint64 session_id, td::Promise<td::BufferSlice> promise) {
if (!peer_session_id_ || peer_session_id_.value() != session_id) {
peer_session_id_ = session_id;
peer_is_inited_ = false;
update_query_id_ = {};
update_state_query_.query_id = {};
}
promise.set_value(ton::create_serialize_tl_object<ton::ton_api::storage_pong>());
}
void PeerActor::execute_add_update(ton::ton_api::storage_addUpdate &add_update, td::Promise<td::BufferSlice> promise) {
auto session_id = static_cast<td::uint64>(add_update.session_id_);
if (session_id != node_session_id_) {
promise.set_error(td::Status::Error(404, "INVALID_SESSION"));
return;
}
promise.set_value(ton::create_serialize_tl_object<ton::ton_api::storage_ok>());
auto state = state_.lock();
auto add_piece = [&](PartId id) {
if (!peer_have_pieces_.get(id)) {
peer_have_pieces_.set_one(id);
state->peer_ready_parts_.push_back(id);
notify_node();
}
};
auto seqno = static_cast<td::uint32>(add_update.seqno_);
auto update_peer_state = [&](PeerState::State peer_state) {
if (peer_seqno_ >= seqno) {
return;
}
if (state->peer_state_ && state->peer_state_.value() == peer_state) {
return;
}
peer_seqno_ = seqno;
state->peer_state_ = peer_state;
notify_node();
};
//LOG(ERROR) << "Got " << to_string(add_update);
downcast_call(*add_update.update_,
td::overloaded(
[&](ton::ton_api::storage_updateHavePieces &have_pieces) {
for (auto id : have_pieces.piece_id_) {
add_piece(id);
}
},
[&](ton::ton_api::storage_updateState &state) { update_peer_state(from_ton_api(*state.state_)); },
[&](ton::ton_api::storage_updateInit &init) {
update_peer_state(from_ton_api(*init.state_));
td::Bitset new_bitset;
new_bitset.set_raw(init.have_pieces_.as_slice().str());
for (auto size = new_bitset.size(), i = size_t(0); i < size; i++) {
if (new_bitset.get(i)) {
add_piece(static_cast<PartId>(i));
}
}
}));
}
void PeerActor::execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise<td::BufferSlice> promise) {
PartId piece_id = get_piece.piece_id_;
peer_get_piece_[piece_id] = {std::move(promise)};
}
} // namespace ton

112
storage/PeerActor.h Normal file
View file

@ -0,0 +1,112 @@
#pragma once
#include "Bitset.h"
#include "PeerState.h"
#include "SharedState.h"
#include "td/utils/optional.h"
#include "auto/tl/ton_api.h"
namespace ton {
class PeerActor : public td::actor::Actor {
public:
class Callback {
public:
virtual ~Callback() {
}
virtual void register_self(td::actor::ActorId<PeerActor> self) = 0;
virtual void send_query(td::uint64 query_id, td::BufferSlice query) = 0;
};
PeerActor(td::unique_ptr<Callback> callback, td::SharedState<PeerState> state);
void execute_query(td::BufferSlice query, td::Promise<td::BufferSlice> promise);
void on_query_result(td::uint64 query_id, td::Result<td::BufferSlice> r_answer);
private:
td::unique_ptr<Callback> callback_;
td::SharedState<PeerState> state_;
bool need_notify_node_{false};
td::uint64 next_query_id_{0};
// ping
td::Timestamp next_ping_at_;
td::optional<td::uint64> ping_query_id_;
td::Timestamp wait_pong_till_;
// startSession
td::uint64 node_session_id_;
td::Bitset peer_have_pieces_;
// update
td::optional<td::uint64> peer_session_id_;
td::optional<td::uint64> update_query_id_;
bool peer_is_inited_{false};
td::uint32 node_seqno_{0};
td::Bitset have_pieces_;
std::vector<PartId> have_pieces_list_;
td::uint32 peer_seqno_{0};
// update state
struct UpdateState {
td::optional<td::uint64> query_id;
PeerState::State state;
};
UpdateState update_state_query_;
// getPiece
struct NodePieceQuery {
td::optional<td::uint64> query_id;
};
std::map<PartId, NodePieceQuery> node_get_piece_;
struct PeerPieceQuery {
td::Promise<td::BufferSlice> promise;
};
std::map<PartId, PeerPieceQuery> peer_get_piece_;
void start_up() override;
void loop() override;
void loop_notify_node();
void loop_pong();
void execute_ping(td::uint64 session_id, td::Promise<td::BufferSlice> promise);
void on_ping_result(td::Result<td::BufferSlice> r_answer);
void on_pong();
void loop_ping();
void loop_update_init();
void loop_update_pieces();
void update_have_pieces();
void loop_update_state();
td::BufferSlice create_update_query(ton::tl_object_ptr<ton::ton_api::storage_Update> update);
void loop_node_get_piece();
void loop_peer_get_piece();
void execute_add_update(ton::ton_api::storage_addUpdate &add_update, td::Promise<td::BufferSlice> promise);
void execute_get_piece(ton::ton_api::storage_getPiece &get_piece, td::Promise<td::BufferSlice> promise);
void on_update_result(td::Result<td::BufferSlice> r_answer);
void on_get_piece_result(PartId piece_id, td::Result<td::BufferSlice> r_answer);
void on_update_state_result(td::Result<td::BufferSlice> r_answer);
template <class T, class... ArgsT>
td::uint64 create_and_send_query(ArgsT &&... args);
td::uint64 send_query(td::BufferSlice query);
void schedule_loop();
void notify_node();
};
} // namespace ton

16
storage/PeerState.cpp Normal file
View file

@ -0,0 +1,16 @@
#include "PeerState.h"
namespace ton {
void PeerState::notify_node() {
if (node.empty()) {
return;
}
td::actor::send_signals_later(node, td::actor::ActorSignals::wakeup());
}
void PeerState::notify_peer() {
if (peer.empty()) {
return;
}
td::actor::send_signals_later(peer, td::actor::ActorSignals::wakeup());
}
} // namespace ton

55
storage/PeerState.h Normal file
View file

@ -0,0 +1,55 @@
#pragma once
#include "td/utils/buffer.h"
#include "td/utils/common.h"
#include "td/utils/optional.h"
#include "td/actor/actor.h"
#include <map>
#include "LoadSpeed.h"
namespace ton {
using PeerId = td::uint64;
using PartId = td::uint32;
struct PeerState {
struct State {
bool will_upload{false};
bool want_download{false};
auto key() const {
return std::tie(will_upload, want_download);
}
bool operator==(const State &other) const {
return key() == other.key();
}
};
State node_state_;
td::optional<State> peer_state_;
bool peer_online_{false};
struct Part {
td::BufferSlice proof;
td::BufferSlice data;
};
std::map<PartId, td::optional<td::Result<Part>>> node_queries_;
std::map<PartId, td::optional<td::Result<Part>>> peer_queries_;
// Peer -> Node
// update are added to this vector, so reader will be able to process all changes
std::vector<PartId> peer_ready_parts_;
// Node -> Peer
// writer writes all new parts to this vector. This state will be eventually synchornized with a peer
std::vector<PartId> node_ready_parts_;
td::actor::ActorId<> node;
td::actor::ActorId<> peer;
LoadSpeed upload;
LoadSpeed download;
void notify_node();
void notify_peer();
};
} // namespace ton

48
storage/SharedState.h Normal file
View file

@ -0,0 +1,48 @@
#pragma once
#include <atomic>
#include "td/utils/MovableValue.h"
#include <memory>
namespace td {
template <class T>
class SharedState {
public:
friend class Guard;
class Guard {
public:
Guard(Guard &&) = default;
Guard(SharedState<T> *self) : self(self) {
CHECK(!self->data_->is_locked.exchange(true));
}
~Guard() {
if (self.get()) {
CHECK(self.get()->data_->is_locked.exchange(false));
}
}
T *get() {
return &self.get()->data_->data;
}
T *operator->() {
return get();
}
private:
td::MovableValue<SharedState<T> *> self;
};
auto lock() {
return Guard{this};
}
auto unsafe() {
return &data_->data;
}
private:
struct Data {
std::atomic<bool> is_locked{};
T data;
};
std::shared_ptr<Data> data_{std::make_shared<Data>()};
};
} // namespace td

425
storage/Torrent.cpp Normal file
View file

@ -0,0 +1,425 @@
#include "Torrent.h"
#include "td/utils/Status.h"
#include "td/utils/crypto.h"
#include "td/utils/port/Stat.h"
#include "td/utils/tl_helpers.h"
namespace ton {
Torrent::Torrent(TorrentMeta meta)
: info_(meta.info)
, merkle_tree_(info_.pieces_count(), info_.root_hash)
, piece_is_ready_(info_.pieces_count(), false) {
not_ready_piece_count_ = piece_is_ready_.size();
header_pieces_count_ = (info_.header_size + info_.piece_size - 1) / info_.piece_size;
not_ready_pending_piece_count_ = header_pieces_count_;
if (meta.header) {
set_header(meta.header.unwrap());
} else {
header_str_ = td::BufferSlice(info_.header_size);
}
if (meta.root_proof.not_null()) {
merkle_tree_.add_proof(meta.root_proof);
}
}
td::Result<Torrent> Torrent::open(Options options, TorrentMeta meta) {
Torrent res(std::move(meta));
if (!options.in_memory) {
if (options.root_dir.empty()) {
options.root_dir = ".";
}
res.set_root_dir(options.root_dir);
}
if (options.validate) {
res.validate();
}
return std::move(res);
}
td::Result<Torrent> Torrent::open(Options options, td::Slice meta_str) {
TRY_RESULT(meta, TorrentMeta::deserialize(meta_str));
return open(std::move(options), std::move(meta));
}
const Torrent::Info &Torrent::get_info() const {
return info_;
}
struct IterateInfo {
td::uint64 piece_offset;
td::uint64 chunk_offset;
td::uint64 size;
};
template <class F>
td::Status Torrent::iterate_piece(Info::PieceInfo piece, F &&f) {
auto chunk_it = std::lower_bound(chunks_.begin(), chunks_.end(), piece.offset, [](auto &chunk, auto &piece_offset) {
return chunk.offset + chunk.size <= piece_offset;
});
td::uint64 size = 0;
for (; chunk_it != chunks_.end(); chunk_it++) {
if (chunk_it->offset >= piece.offset + piece.size) {
break;
}
if (chunk_it->size == 0) {
continue;
}
auto l = td::max(chunk_it->offset, piece.offset);
auto r = td::min(chunk_it->offset + chunk_it->size, piece.offset + piece.size);
CHECK(l < r);
IterateInfo info;
info.piece_offset = l - piece.offset;
info.chunk_offset = l - chunk_it->offset;
info.size = r - l;
size += info.size;
TRY_STATUS(f(chunk_it, info));
}
LOG_CHECK(size == piece.size) << size << " vs " << piece.size;
return td::Status::OK();
}
bool Torrent::is_piece_ready(td::uint64 piece_i) const {
CHECK(piece_i < info_.pieces_count());
return piece_is_ready_[piece_i];
}
td::optional<size_t> Torrent::get_files_count() const {
if (header_) {
return header_.value().files_count;
}
return {};
}
td::CSlice Torrent::get_file_name(size_t i) const {
return chunks_.at(i + 1).name;
}
td::uint64 Torrent::get_file_size(size_t i) const {
return chunks_.at(i + 1).size;
}
td::uint64 Torrent::get_file_ready_size(size_t i) const {
return chunks_.at(i + 1).ready_size;
}
Torrent::PartsRange Torrent::get_file_parts_range(size_t i) {
auto begin = chunks_.at(i + 1).offset;
auto end = begin + chunks_.at(i + 1).size;
PartsRange res;
res.begin = begin / info_.piece_size;
res.end = (end + info_.piece_size - 1) / info_.piece_size;
return res;
}
Torrent::PartsRange Torrent::get_header_parts_range() {
PartsRange res;
res.begin = 0;
res.end = header_pieces_count_;
return res;
}
TD_WARN_UNUSED_RESULT td::Status Torrent::ChunkState::get_piece(td::MutableSlice dest, td::uint64 offset,
Cache *cache) {
if (dest.empty()) {
return td::Status::OK();
}
if (cache != nullptr) {
auto global_offset = offset + this->offset;
if (cache->offset > global_offset || cache->offset + cache->size < global_offset + dest.size()) {
auto load_size = td::min(size - offset, (td::uint64)cache->slice.size());
cache->size = 0;
TRY_STATUS(get_piece(cache->slice.as_slice().truncate(load_size), offset));
cache->offset = global_offset;
cache->size = load_size;
}
dest.copy_from(cache->slice.as_slice().substr(global_offset - cache->offset, dest.size()));
CHECK(cache->slice.size() >= dest.size());
return td::Status::OK();
}
TRY_RESULT(size, data.view_copy(dest, offset));
if (size != dest.size()) {
return td::Status::Error("Failed to read the whole chunk");
}
return td::Status::OK();
}
std::string Torrent::get_stats_str() const {
td::StringBuilder sb;
auto o_n = get_files_count();
if (!o_n) {
return "NO HEADER YET\n";
}
for (size_t i = 0, n = o_n.unwrap(); i < n; i++) {
auto size = get_file_size(i);
auto ready_size = get_file_ready_size(i);
sb << get_file_name(i) << "\t" << 100 * ready_size / size << "%% " << td::format::as_size(ready_size) << "/"
<< td::format::as_size(size) << "\n";
}
return sb.as_cslice().str();
}
void Torrent::validate() {
CHECK(header_);
std::fill(piece_is_ready_.begin(), piece_is_ready_.end(), false);
not_ready_piece_count_ = info_.pieces_count();
for (auto &chunk : chunks_) {
chunk.ready_size = 0;
if (root_dir_) {
if (td::stat(get_chunk_path(chunk.name)).is_error()) {
continue;
}
}
init_chunk_data(chunk);
}
std::vector<td::UInt256> hashes;
std::vector<MerkleTree::Chunk> chunks;
auto flush = [&] {
td::Bitset bitmask;
merkle_tree_.add_chunks(chunks, bitmask);
for (size_t i = 0; i < chunks.size(); i++) {
if (!bitmask.get(i)) {
continue;
}
auto piece_i = chunks[i].index;
auto piece = info_.get_piece_info(piece_i);
iterate_piece(piece, [&](auto it, auto info) {
it->ready_size += info.size;
return td::Status::OK();
});
piece_is_ready_[piece_i] = true;
ready_parts_count_++;
CHECK(not_ready_piece_count_);
not_ready_piece_count_--;
}
hashes.clear();
chunks.clear();
};
td::BufferSlice buf(info_.piece_size);
ChunkState::Cache cache;
cache.slice = td::BufferSlice(td::max(8u << 20, info_.piece_size));
for (size_t piece_i = 0; piece_i < info_.pieces_count(); piece_i++) {
auto piece = info_.get_piece_info(piece_i);
td::Sha256State sha256;
sha256.init();
bool skipped = false;
auto is_ok = iterate_piece(piece, [&](auto it, auto info) {
if (!it->data) {
skipped = true;
return td::Status::Error("No such file");
}
if (!it->has_piece(info.chunk_offset, info.size)) {
return td::Status::Error("Don't have piece");
}
auto dest = buf.as_slice().truncate(info.size);
TRY_STATUS(it->get_piece(dest, info.chunk_offset, &cache));
sha256.feed(dest);
//LOG(ERROR) << dest;
return td::Status::OK();
});
if (is_ok.is_error()) {
LOG_IF(ERROR, !skipped) << "Failed: " << is_ok;
LOG(ERROR) << "Failed: " << is_ok;
continue;
}
MerkleTree::Chunk chunk;
chunk.index = piece_i;
sha256.extract(chunk.hash.as_slice());
chunks.push_back(chunk);
}
flush();
}
td::Result<std::string> Torrent::get_piece_data(td::uint64 piece_i) {
CHECK(piece_i < info_.pieces_count());
if (!piece_is_ready_[piece_i]) {
return td::Status::Error("Piece is not ready");
}
auto it = pending_pieces_.find(piece_i);
if (it != pending_pieces_.end()) {
return it->second;
}
auto piece = info_.get_piece_info(piece_i);
std::string res(piece.size, '\0');
iterate_piece(piece, [&](auto it, auto info) {
return it->get_piece(td::MutableSlice(res).substr(info.piece_offset, info.size), info.chunk_offset);
});
return res;
}
td::Result<td::Ref<vm::Cell>> Torrent::get_piece_proof(td::uint64 piece_i) {
CHECK(piece_i < info_.pieces_count());
return merkle_tree_.gen_proof(piece_i, piece_i);
}
td::Status Torrent::add_piece(td::uint64 piece_i, td::Slice data, td::Ref<vm::Cell> proof) {
TRY_STATUS(merkle_tree_.add_proof(proof));
//LOG(ERROR) << "Add piece #" << piece_i;
CHECK(piece_i < info_.pieces_count());
if (piece_is_ready_[piece_i]) {
return td::Status::OK();
}
piece_is_ready_[piece_i] = true;
ready_parts_count_++;
ton::MerkleTree::Chunk chunk;
chunk.index = piece_i;
td::sha256(data, chunk.hash.as_slice());
TRY_STATUS(merkle_tree_.try_add_chunks({chunk}));
if (chunks_.empty()) {
return add_header_piece(piece_i, data);
}
return add_validated_piece(piece_i, data);
}
td::Status Torrent::add_header_piece(td::uint64 piece_i, td::Slice data) {
pending_pieces_[piece_i] = data.str();
if (piece_i < header_pieces_count_) {
//LOG(ERROR) << "Add header piece #" << piece_i;
auto piece = info_.get_piece_info(piece_i);
auto dest = header_str_.as_slice().substr(piece.offset);
data.truncate(dest.size());
dest.copy_from(data);
not_ready_pending_piece_count_--;
if (not_ready_pending_piece_count_ == 0) {
//LOG(ERROR) << "Got full header";
TorrentHeader header;
TRY_STATUS(td::unserialize(header, header_str_.as_slice())); // TODO: it is a fatal error
set_header(header);
for (auto &it : pending_pieces_) {
TRY_STATUS(add_validated_piece(it.first, it.second));
}
pending_pieces_.clear();
}
} else {
LOG(ERROR) << "PENDING";
}
return td::Status::OK();
}
std::string Torrent::get_chunk_path(td::Slice name) {
return PSTRING() << root_dir_.value() << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH << name;
}
td::Status Torrent::init_chunk_data(ChunkState &chunk) {
if (chunk.data) {
return td::Status::OK();
}
if (root_dir_) {
TRY_RESULT(data, td::FileNoCacheBlobView::create(get_chunk_path(chunk.name), chunk.size, true));
chunk.data = std::move(data);
} else {
chunk.data = td::BufferSliceBlobView::create(td::BufferSlice(chunk.size));
}
return td::Status::OK();
}
td::Status Torrent::add_validated_piece(td::uint64 piece_i, td::Slice data) {
CHECK(!chunks_.empty());
auto piece = info_.get_piece_info(piece_i);
TRY_STATUS(iterate_piece(piece, [&](auto it, auto info) {
TRY_STATUS(init_chunk_data(*it));
return it->add_piece(data.substr(info.piece_offset, info.size), info.chunk_offset);
}));
piece_is_ready_[piece_i] = true;
not_ready_piece_count_--;
return td::Status::OK();
}
bool Torrent::is_completed() const {
return not_ready_piece_count_ == 0;
}
td::Result<td::BufferSlice> Torrent::read_file(td::Slice name) {
for (auto &chunk : chunks_) {
if (chunk.name == name) {
td::BufferSlice res(chunk.size);
TRY_STATUS(chunk.get_piece(res.as_slice(), 0));
return std::move(res);
}
}
return td::Status::Error("Unknown name");
}
Torrent::GetMetaOptions::GetMetaOptions() = default;
std::string Torrent::get_meta_str(const GetMetaOptions &options) const {
return get_meta(options).serialize();
}
TorrentMeta Torrent::get_meta(const GetMetaOptions &options) const {
TorrentMeta torrent_file;
if (options.with_header) {
torrent_file.header = header_;
}
torrent_file.info = info_;
torrent_file.info.init_cell();
if (options.with_proof) {
torrent_file.root_proof = merkle_tree_.get_root(options.proof_depth_limit);
}
return torrent_file;
}
Torrent::Torrent(Info info, td::optional<TorrentHeader> header, ton::MerkleTree tree, std::vector<ChunkState> chunks)
: info_(info)
, header_(std::move(header))
, merkle_tree_(std::move(tree))
, piece_is_ready_(info_.pieces_count(), true)
, ready_parts_count_{info_.pieces_count()}
, chunks_(std::move(chunks)) {
}
void Torrent::set_header(const TorrentHeader &header) {
header_ = header;
auto add_chunk = [&](td::Slice name, td::uint64 offset, td::uint64 size) {
ChunkState chunk;
chunk.name = name.str();
chunk.ready_size = 0;
chunk.size = size;
chunk.offset = offset;
chunks_.push_back(std::move(chunk));
};
add_chunk("", 0, header.serialization_size());
chunks_.back().data = td::BufferSliceBlobView::create(header.serialize());
for (size_t i = 0; i < header.files_count; i++) {
auto l = header.get_data_begin(i);
auto r = header.get_data_end(i);
add_chunk(header.get_name(i), l, r - l);
}
}
size_t Torrent::get_ready_parts_count() const {
return ready_parts_count_;
}
std::vector<size_t> Torrent::chunks_by_piece(td::uint64 piece_id) {
std::vector<size_t> res;
auto piece = info_.get_piece_info(piece_id);
auto is_ok = iterate_piece(piece, [&](auto it, auto info) {
res.push_back(it - chunks_.begin());
return td::Status::OK();
});
return res;
}
} // namespace ton

150
storage/Torrent.h Normal file
View file

@ -0,0 +1,150 @@
#pragma once
#include "MerkleTree.h"
#include "TorrentMeta.h"
#include "td/utils/buffer.h"
#include "td/db/utils/BlobView.h"
#include <map>
namespace ton {
class Torrent {
public:
class Creator;
friend class Creator;
using Info = TorrentInfo;
struct Options {
std::string root_dir;
bool in_memory{false};
bool validate{false};
};
// creation
static td::Result<Torrent> open(Options options, TorrentMeta meta);
static td::Result<Torrent> open(Options options, td::Slice meta_str);
void validate();
std::string get_stats_str() const;
const Info &get_info() const;
// get piece and proof
td::Result<std::string> get_piece_data(td::uint64 piece_i);
td::Result<td::Ref<vm::Cell>> get_piece_proof(td::uint64 piece_i);
// add piece (with an optional proof)
td::Status add_piece(td::uint64 piece_i, td::Slice data, td::Ref<vm::Cell> proof);
//TODO: add multiple chunks? Merkle tree supports much more general interface
bool is_completed() const;
// Checks that file is ready and returns its content.
// Intened mostly for in-memory usage and for tests
td::Result<td::BufferSlice> read_file(td::Slice name);
struct GetMetaOptions {
GetMetaOptions();
size_t proof_depth_limit{std::numeric_limits<size_t>::max()};
bool with_header{true};
bool with_proof{true};
GetMetaOptions &without_header() {
with_header = false;
return *this;
}
GetMetaOptions &without_proof() {
with_proof = false;
return *this;
}
GetMetaOptions &with_proof_depth_limit(size_t limit) {
proof_depth_limit = limit;
return *this;
}
};
std::string get_meta_str(const GetMetaOptions &options = {}) const;
TorrentMeta get_meta(const GetMetaOptions &options = {}) const;
// Some api for inspection of a current state
bool is_piece_ready(td::uint64 piece_i) const;
td::optional<size_t> get_files_count() const;
td::CSlice get_file_name(size_t i) const;
td::uint64 get_file_size(size_t i) const;
td::uint64 get_file_ready_size(size_t i) const;
struct PartsRange {
td::uint64 begin{0};
td::uint64 end{0};
};
PartsRange get_file_parts_range(size_t i);
PartsRange get_header_parts_range();
size_t get_ready_parts_count() const;
std::vector<size_t> chunks_by_piece(td::uint64 piece_id);
private:
Info info_;
td::optional<std::string> root_dir_;
// While header is not completly available all pieces are stored in memory
td::BufferSlice header_str_;
td::optional<TorrentHeader> header_;
size_t not_ready_pending_piece_count_{0};
size_t header_pieces_count_{0};
std::map<td::uint64, td::string> pending_pieces_;
ton::MerkleTree merkle_tree_;
std::vector<bool> piece_is_ready_;
size_t not_ready_piece_count_{0};
size_t ready_parts_count_{0};
struct ChunkState {
std::string name;
td::uint64 offset{0};
td::uint64 size{0};
td::uint64 ready_size{0};
td::BlobView data;
struct Cache {
td::uint64 offset{0};
td::uint64 size{0};
td::BufferSlice slice;
};
bool is_ready() const {
return ready_size == size;
}
TD_WARN_UNUSED_RESULT td::Status add_piece(td::Slice piece, td::uint64 offset) {
TRY_RESULT(written, data.write(piece, offset));
CHECK(written == piece.size());
ready_size += written;
return td::Status::OK();
}
bool has_piece(td::uint64 offset, td::uint64 size) {
return data.size() >= offset + size;
}
TD_WARN_UNUSED_RESULT td::Status get_piece(td::MutableSlice dest, td::uint64 offset, Cache *cache = nullptr);
};
std::vector<ChunkState> chunks_;
explicit Torrent(Info info, td::optional<TorrentHeader> header, ton::MerkleTree tree, std::vector<ChunkState> chunk);
explicit Torrent(TorrentMeta meta);
void set_root_dir(std::string root_dir) {
root_dir_ = std::move(root_dir);
}
std::string get_chunk_path(td::Slice name);
td::Status init_chunk_data(ChunkState &chunk);
template <class F>
td::Status iterate_piece(Info::PieceInfo piece, F &&f);
td::Status add_header_piece(td::uint64 piece_i, td::Slice data);
td::Status add_validated_piece(td::uint64 piece_i, td::Slice data);
void set_header(const TorrentHeader &header);
};
} // namespace ton

173
storage/TorrentCreator.cpp Normal file
View file

@ -0,0 +1,173 @@
#include "TorrentCreator.h"
#include "td/db/utils/CyclicBuffer.h"
#include "td/utils/crypto.h"
#include "td/utils/PathView.h"
#include "td/utils/port/path.h"
#include "td/utils/tl_helpers.h"
namespace ton {
td::Result<Torrent> Torrent::Creator::create_from_path(Options options, td::CSlice raw_path) {
TRY_RESULT(path, td::realpath(raw_path));
TRY_RESULT(stat, td::stat(path));
if (stat.is_dir_) {
if (!path.empty() && path.back() != TD_DIR_SLASH) {
path += TD_DIR_SLASH;
}
if (!options.dir_name) {
options.dir_name = td::PathView::dir_and_file(path).str();
}
Torrent::Creator creator(options);
td::Status status;
auto walk_status = td::WalkPath::run(path, [&](td::CSlice name, td::WalkPath::Type type) {
if (type == td::WalkPath::Type::NotDir) {
status = creator.add_file(td::PathView::relative(name, path), name);
if (status.is_error()) {
return td::WalkPath::Action::Abort;
}
}
return td::WalkPath::Action::Continue;
});
TRY_STATUS(std::move(status));
TRY_STATUS(std::move(walk_status));
return creator.finalize();
} else {
Torrent::Creator creator(options);
TRY_STATUS(creator.add_file(td::PathView(path).file_name(), path));
return creator.finalize();
}
}
td::Result<Torrent> Torrent::Creator::create_from_blobs(Options options, td::Span<Blob> blobs) {
Torrent::Creator creator(options);
for (auto &blob : blobs) {
TRY_STATUS(creator.add_blob(blob.name, blob.data));
}
return creator.finalize();
}
td::Status Torrent::Creator::add_blob(td::Slice name, td::Slice blob) {
return add_blob(name, td::BufferSliceBlobView::create(td::BufferSlice(blob)));
}
td::Status Torrent::Creator::add_blob(td::Slice name, td::BlobView blob) {
File file;
file.name = name.str();
file.data = std::move(blob);
files_.push_back(std::move(file));
return td::Status::OK();
}
TD_WARN_UNUSED_RESULT td::Status Torrent::Creator::add_file(td::Slice name, td::CSlice path) {
LOG(INFO) << "Add file " << name << " " << path;
TRY_RESULT(data, td::FileNoCacheBlobView::create(path));
return add_blob(name, std::move(data));
}
td::Result<Torrent> Torrent::Creator::finalize() {
TorrentHeader header;
TRY_RESULT(files_count, td::narrow_cast_safe<td::uint32>(files_.size()));
header.files_count = files_count;
header.data_index.resize(files_count);
header.name_index.resize(files_count);
td::uint64 data_offset = 0;
for (size_t i = 0; i < files_.size(); i++) {
header.names += files_[i].name;
header.name_index[i] = header.names.size();
data_offset += files_[i].data.size();
header.data_index[i] = data_offset;
}
header.tot_names_size = header.names.size();
if (options_.dir_name) {
header.dir_name = options_.dir_name.value();
}
// Now we should stream all data to calculate sha256 of all pieces
std::string buffer;
td::CyclicBuffer::Options cb_options;
cb_options.chunk_size = td::max(options_.piece_size * 16, (1 << 20) / options_.piece_size * options_.piece_size);
cb_options.count = 2;
auto reader_writer = td::CyclicBuffer::create(cb_options);
auto reader = std::move(reader_writer.first);
auto writer = std::move(reader_writer.second);
auto header_size = header.serialization_size();
auto file_size = header_size + data_offset;
auto chunks_count = (file_size + options_.piece_size - 1) / options_.piece_size;
ton::MerkleTree tree;
tree.init_begin(chunks_count);
std::vector<Torrent::ChunkState> chunks;
size_t chunk_i = 0;
auto flush_reader = [&](bool force) {
while (true) {
auto slice = reader.prepare_read();
slice.truncate(options_.piece_size);
if (slice.empty() || (slice.size() != options_.piece_size && !force)) {
break;
}
td::UInt256 hash;
sha256(slice, hash.as_slice());
CHECK(chunk_i < chunks_count);
tree.init_add_chunk(chunk_i, hash.as_slice());
chunk_i++;
reader.confirm_read(slice.size());
}
};
td::uint64 offset = 0;
auto add_blob = [&](auto &&data, td::Slice name) {
td::uint64 data_offset = 0;
while (data_offset < data.size()) {
auto dest = writer.prepare_write();
CHECK(dest.size() != 0);
dest.truncate(data.size() - data_offset);
TRY_RESULT(got_size, data.view_copy(dest, data_offset));
CHECK(got_size != 0);
data_offset += got_size;
writer.confirm_write(got_size);
flush_reader(false);
}
Torrent::ChunkState chunk;
chunk.name = name.str();
chunk.offset = offset;
chunk.size = data.size();
chunk.ready_size = chunk.size;
chunk.data = std::move(data);
offset += chunk.size;
chunks.push_back(std::move(chunk));
return td::Status::OK();
};
Torrent::Info info;
auto header_str = td::serialize(header);
CHECK(header_size == header_str.size());
info.header_size = header_str.size();
td::sha256(header_str, info.header_hash.as_slice());
add_blob(td::BufferSliceBlobView::create(td::BufferSlice(header_str)), "").ensure();
for (auto &file : files_) {
add_blob(std::move(file.data), file.name).ensure();
}
flush_reader(true);
tree.init_finish();
CHECK(chunk_i == chunks_count);
CHECK(offset == file_size);
info.header_size = header.serialization_size();
info.piece_size = options_.piece_size;
info.description = options_.description;
info.file_size = file_size;
info.depth = tree.get_depth();
info.root_hash = tree.get_root_hash();
info.init_cell();
Torrent torrent(info, std::move(header), std::move(tree), std::move(chunks));
return std::move(torrent);
}
} // namespace ton

48
storage/TorrentCreator.h Normal file
View file

@ -0,0 +1,48 @@
#pragma once
#include "Torrent.h"
#include "td/utils/optional.h"
#include "td/utils/SharedSlice.h"
#include "td/db/utils/BlobView.h"
namespace ton {
class Torrent::Creator {
public:
struct Options {
td::uint32 piece_size{128 * 768};
// override default dir_name
// should't be used in a usual workflow
td::optional<std::string> dir_name;
std::string description;
};
// If path is a file create a torrent with one file in it.
// If path is a directory, create a torrent with whole directory.
static td::Result<Torrent> create_from_path(Options options, td::CSlice path);
struct Blob {
td::Slice name;
td::Slice data;
};
static td::Result<Torrent> create_from_blobs(Options options, td::Span<Blob> blobs);
// This api is mostly for internal usage. One should prefer static methods
explicit Creator(Options options) : options_(options) {
}
TD_WARN_UNUSED_RESULT td::Status add_blob(td::Slice name, td::Slice blob);
TD_WARN_UNUSED_RESULT td::Status add_blob(td::Slice name, td::BlobView blob);
TD_WARN_UNUSED_RESULT td::Status add_file(td::Slice name, td::CSlice path);
td::Result<Torrent> finalize();
private:
td::Status init();
Options options_;
struct File {
std::string name;
td::BlobView data;
};
std::vector<File> files_;
};
} // namespace ton

50
storage/TorrentHeader.cpp Normal file
View file

@ -0,0 +1,50 @@
#include "TorrentHeader.hpp"
#include "td/utils/tl_helpers.h"
namespace ton {
td::CSlice TorrentHeader::get_dir_name() const {
return dir_name;
}
td::uint32 TorrentHeader::get_files_count() const {
return files_count;
}
td::uint64 TorrentHeader::get_data_begin(td::uint64 file_i) const {
return get_data_offset(file_i);
}
td::uint64 TorrentHeader::get_data_end(td::uint64 file_i) const {
return get_data_offset(file_i + 1);
}
td::uint64 TorrentHeader::serialization_size() const {
return td::tl_calc_length(*this);
}
td::uint64 TorrentHeader::get_data_offset(td::uint64 offset_i) const {
td::uint64 res = serialization_size();
if (offset_i > 0) {
CHECK(offset_i <= files_count);
res += data_index[offset_i - 1];
}
return res;
}
td::BufferSlice TorrentHeader::serialize() const {
return td::BufferSlice(td::serialize(*this));
}
td::uint64 TorrentHeader::get_data_size(td::uint64 file_i) const {
auto res = data_index[file_i];
if (file_i > 0) {
res -= data_index[file_i - 1];
}
return res;
}
td::Slice TorrentHeader::get_name(td::uint64 file_i) const {
CHECK(file_i < files_count);
auto from = file_i == 0 ? 0 : name_index[file_i - 1];
auto till = name_index[file_i];
return td::Slice(names).substr(from, till - from);
}
} // namespace ton

49
storage/TorrentHeader.h Normal file
View file

@ -0,0 +1,49 @@
#pragma once
#include "td/utils/Slice.h"
#include "td/utils/buffer.h"
namespace ton {
// fec_info_none#c82a1964 = FecInfo;
//
// torrent_header#9128aab7
// files_count:uint32
// tot_name_size:uint64
// tot_data_size:uint64
// fec:FecInfo
// dir_name_size:uint32
// dir_name:(dir_name_size * [uint8])
// name_index:(files_count * [uint64])
// data_index:(files_count * [uint64])
// names:(file_names_size * [uint8])
// data:(tot_data_size * [uint8])
// = TorrentHeader;
struct TorrentHeader {
td::uint32 files_count{0};
td::uint64 tot_names_size{0};
td::uint64 tot_data_size{0};
//fec_none
std::string dir_name;
std::vector<td::uint64> name_index;
std::vector<td::uint64> data_index;
std::string names;
td::uint64 get_data_begin(td::uint64 file_i) const;
td::uint64 get_data_end(td::uint64 file_i) const;
td::uint64 get_data_offset(td::uint64 offset_i) const;
td::uint64 get_data_size(td::uint64 file_i) const;
td::Slice get_name(td::uint64 file_i) const;
td::CSlice get_dir_name() const;
td::uint32 get_files_count() const;
td::uint64 serialization_size() const;
td::BufferSlice serialize() const;
static constexpr td::uint32 type = 0x9128aab7;
template <class StorerT>
void store(StorerT &storer) const;
template <class ParserT>
void parse(ParserT &parser);
};
} // namespace ton

62
storage/TorrentHeader.hpp Normal file
View file

@ -0,0 +1,62 @@
#pragma once
#include "td/utils/tl_helpers.h"
#include "TorrentHeader.h"
namespace ton {
template <class StorerT>
void TorrentHeader::store(StorerT &storer) const {
using td::store;
store(type, storer);
store(files_count, storer);
store(tot_names_size, storer);
store(tot_data_size, storer);
td::uint32 fec_type = 0xc82a1964;
store(fec_type, storer);
td::uint32 dir_name_size = td::narrow_cast<td::uint32>(dir_name.size());
store(dir_name_size, storer);
storer.store_slice(dir_name);
CHECK(name_index.size() == files_count);
CHECK(data_index.size() == files_count);
for (auto x : name_index) {
store(x, storer);
}
for (auto x : data_index) {
store(x, storer);
}
CHECK(tot_names_size == names.size());
storer.store_slice(names);
}
template <class ParserT>
void TorrentHeader::parse(ParserT &parser) {
using td::parse;
td::uint32 got_type;
parse(got_type, parser);
if (got_type != type) {
parser.set_error("Unknown fec type");
return;
}
parse(files_count, parser);
parse(tot_names_size, parser);
parse(tot_data_size, parser);
td::uint32 fec_type;
parse(fec_type, parser);
td::uint32 dir_name_size;
parse(dir_name_size, parser);
dir_name = parser.template fetch_string_raw<std::string>(dir_name_size);
if (fec_type != 0xc82a1964) {
parser.set_error("Unknown fec type");
return;
}
name_index.resize(files_count);
for (auto &x : name_index) {
parse(x, parser);
}
data_index.resize(files_count);
for (auto &x : data_index) {
parse(x, parser);
}
names = parser.template fetch_string_raw<std::string>(tot_names_size);
}
} // namespace ton

47
storage/TorrentInfo.cpp Normal file
View file

@ -0,0 +1,47 @@
#include "TorrentInfo.h"
#include "vm/cells/CellString.h"
#include "vm/cellslice.h"
#include "td/utils/misc.h"
namespace ton {
bool TorrentInfo::pack(vm::CellBuilder &cb) const {
return cb.store_long_bool(depth, 32) && cb.store_long_bool(piece_size, 32) && cb.store_long_bool(file_size, 64) &&
cb.store_bits_bool(root_hash) && cb.store_long_bool(header_size, 64) && cb.store_bits_bool(header_hash) &&
vm::CellText::store(cb, description).is_ok();
}
bool TorrentInfo::unpack(vm::CellSlice &cs) {
return cs.fetch_uint_to(32, depth) && cs.fetch_uint_to(32, piece_size) && cs.fetch_uint_to(64, file_size) &&
cs.fetch_bits_to(root_hash) && cs.fetch_uint_to(64, header_size) && cs.fetch_bits_to(header_hash) &&
vm::CellText::fetch_to(cs, description);
}
vm::Cell::Hash TorrentInfo::get_hash() const {
return as_cell()->get_hash();
}
void TorrentInfo::init_cell() {
vm::CellBuilder cb;
CHECK(pack(cb));
cell_ = cb.finalize();
}
td::Ref<vm::Cell> TorrentInfo::as_cell() const {
CHECK(cell_.not_null());
return cell_;
}
td::uint64 TorrentInfo::pieces_count() const {
return (file_size + piece_size - 1) / piece_size;
}
TorrentInfo::PieceInfo TorrentInfo::get_piece_info(td::uint64 piece_i) const {
PieceInfo info;
info.offset = piece_size * piece_i;
CHECK(info.offset < file_size);
info.size = td::min(static_cast<td::uint64>(piece_size), file_size - info.offset);
return info;
}
} // namespace ton

38
storage/TorrentInfo.h Normal file
View file

@ -0,0 +1,38 @@
#pragma once
#include "td/utils/Slice.h"
#include "td/utils/UInt.h"
#include "vm/cells.h"
namespace ton {
// torrent_info depth:# piece_size:uint32 file_size:uint64 root_hash:(## 256) header_size:uint64 header_hash:(## 256) description:Text = TorrentInfo;
struct TorrentInfo {
td::uint32 depth{0};
td::uint32 piece_size{768 * 128};
td::uint64 file_size{0};
td::Bits256 root_hash;
td::uint64 header_size;
td::Bits256 header_hash;
std::string description;
bool pack(vm::CellBuilder &cb) const;
bool unpack(vm::CellSlice &cs);
void init_cell();
vm::Cell::Hash get_hash() const;
td::Ref<vm::Cell> as_cell() const;
struct PieceInfo {
td::uint64 offset;
td::uint64 size;
};
td::uint64 pieces_count() const;
PieceInfo get_piece_info(td::uint64 piece_i) const;
private:
td::Ref<vm::Cell> cell_;
};
} // namespace ton

123
storage/TorrentMeta.cpp Normal file
View file

@ -0,0 +1,123 @@
#include "TorrentMeta.h"
#include "TorrentHeader.hpp"
#include "td/utils/crypto.h"
#include "td/utils/tl_helpers.h"
#include "td/utils/UInt.h"
#include "vm/boc.h"
#include "vm/cells/MerkleProof.h"
#include "vm/cellslice.h"
namespace ton {
td::Result<TorrentMeta> TorrentMeta::deserialize(td::Slice data) {
TorrentMeta res;
TRY_STATUS(td::unserialize(res, data));
if (res.header) {
td::Bits256 header_hash;
td::sha256(td::serialize(res.header.value()), header_hash.as_slice());
if (header_hash != res.info.header_hash) {
return td::Status::Error("Header hash mismatch");
}
} else {
LOG(ERROR) << "NO HEADER";
}
if (res.root_proof.not_null()) {
auto root = vm::MerkleProof::virtualize(res.root_proof, 1);
if (root.is_null()) {
return td::Status::Error("Root proof is not a merkle proof");
}
if (root->get_hash().as_slice() != res.info.root_hash.as_slice()) {
return td::Status::Error("Root proof hash mismatch");
}
}
res.info.init_cell();
return res;
}
std::string TorrentMeta::serialize() const {
return td::serialize(*this);
}
template <class StorerT>
void TorrentMeta::store(StorerT &storer) const {
using td::store;
td::uint32 flags = 0;
if (root_proof.not_null()) {
flags |= 1;
}
if (header) {
flags |= 2;
}
store(flags, storer);
const auto info_boc = vm::std_boc_serialize(info.as_cell()).move_as_ok();
td::uint32 info_boc_size = td::narrow_cast<td::uint32>(info_boc.size());
td::BufferSlice root_proof_boc;
td::uint32 root_proof_boc_size{0};
if ((flags & 1) != 0) {
root_proof_boc = vm::std_boc_serialize(root_proof).move_as_ok();
root_proof_boc_size = td::narrow_cast<td::uint32>(root_proof_boc.size());
}
store(info_boc_size, storer);
if ((flags & 1) != 0) {
store(root_proof_boc_size, storer);
}
storer.store_slice(info_boc.as_slice());
if ((flags & 1) != 0) {
storer.store_slice(root_proof_boc.as_slice());
}
if ((flags & 2) != 0) {
store(header.value(), storer);
}
}
template <class ParserT>
void TorrentMeta::parse(ParserT &parser) {
using td::parse;
td::uint32 flags;
parse(flags, parser);
td::uint32 info_boc_size;
td::uint32 root_proof_boc_size;
parse(info_boc_size, parser);
if ((flags & 1) != 0) {
parse(root_proof_boc_size, parser);
}
auto info_boc_str = parser.template fetch_string_raw<std::string>(info_boc_size);
auto r_info_cell = vm::std_boc_deserialize(info_boc_str);
if (r_info_cell.is_error()) {
parser.set_error(r_info_cell.error().to_string());
return;
}
if ((flags & 1) != 0) {
auto root_proof_str = parser.template fetch_string_raw<std::string>(root_proof_boc_size);
auto r_root_proof = vm::std_boc_deserialize(root_proof_str);
if (r_root_proof.is_error()) {
parser.set_error(r_root_proof.error().to_string());
return;
}
root_proof = r_root_proof.move_as_ok();
}
auto cs = vm::load_cell_slice(r_info_cell.move_as_ok());
if (!info.unpack(cs)) {
parser.set_error("Failed to parse TorrentInfo");
return;
}
if ((flags & 2) != 0) {
TorrentHeader new_header;
parse(new_header, parser);
header = std::move(new_header);
}
}
} // namespace ton

37
storage/TorrentMeta.h Normal file
View file

@ -0,0 +1,37 @@
#pragma once
#include "TorrentHeader.h"
#include "TorrentInfo.h"
#include "td/utils/optional.h"
namespace ton {
//
// torrent_file#6a7181e0 flags:(## 32) info_boc_size:uint32
// root_proof_boc_size:flags.0?uint32
// info_boc:(info_boc_size * [uint8])
// root_proof_boc:flags.0?(root_proof_boc_size * [uint8])
// header:flags.1?TorrentHeader = TorrentMeta;
//
struct TorrentMeta {
TorrentMeta() = default;
explicit TorrentMeta(TorrentInfo info, td::Ref<vm::Cell> root_proof = {}, td::optional<TorrentHeader> header = {})
: info(std::move(info)), root_proof(std::move(root_proof)), header(std::move(header)) {
}
TorrentInfo info;
td::Ref<vm::Cell> root_proof;
td::optional<TorrentHeader> header;
static td::Result<TorrentMeta> deserialize(td::Slice data);
std::string serialize() const;
static constexpr td::uint64 type = 0x6a7181e0;
template <class StorerT>
void store(StorerT &storer) const;
template <class ParserT>
void parse(ParserT &parser);
};
} // namespace ton

852
storage/storage-cli.cpp Normal file
View file

@ -0,0 +1,852 @@
#include "adnl/adnl.h"
#include "common/bigint.hpp"
#include "common/bitstring.h"
#include "dht/dht.h"
#include "keys/encryptor.h"
#include "overlay/overlay.h"
#include "rldp/rldp.h"
#include "rldp2/rldp.h"
#include "td/utils/JsonBuilder.h"
#include "td/utils/port/signals.h"
#include "td/utils/Parser.h"
#include "td/utils/overloaded.h"
#include "td/utils/OptionsParser.h"
#include "td/utils/PathView.h"
#include "td/utils/Random.h"
#include "td/utils/misc.h"
#include "td/utils/filesystem.h"
#include "td/utils/port/path.h"
#include "td/actor/actor.h"
#include "td/actor/MultiPromise.h"
#include "terminal/terminal.h"
#include "Torrent.h"
#include "TorrentCreator.h"
#include "NodeActor.h"
#include "auto/tl/ton_api_json.h"
#include <iostream>
#include <limits>
#include <map>
#include <set>
namespace ton_rldp = ton::rldp2;
struct StorageCliOptions {
std::string config;
bool enable_readline{true};
std::string db_root{"dht-db/"};
td::IPAddress addr;
td::optional<std::string> cmd;
};
using AdnlCategory = td::int32;
class PeerManager : public td::actor::Actor {
public:
PeerManager(ton::adnl::AdnlNodeIdShort adnl_id, ton::overlay::OverlayIdFull overlay_id,
td::actor::ActorId<ton::overlay::Overlays> overlays, td::actor::ActorId<ton::adnl::Adnl> adnl,
td::actor::ActorId<ton_rldp::Rldp> rldp)
: adnl_id_(std::move(adnl_id))
, overlay_id_(std::move(overlay_id))
, overlays_(std::move(overlays))
, adnl_(std::move(adnl))
, rldp_(std::move(rldp)) {
CHECK(register_adnl_id(adnl_id_) == 1);
}
void start_up() override {
// TODO: forbid broadcasts?
auto rules = ton::overlay::OverlayPrivacyRules{ton::overlay::Overlays::max_fec_broadcast_size()};
class Callback : public ton::overlay::Overlays::Callback {
public:
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id,
td::BufferSlice data) override {
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::overlay::OverlayIdShort overlay_id, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
}
void receive_broadcast(ton::PublicKeyHash src, ton::overlay::OverlayIdShort overlay_id,
td::BufferSlice data) override {
}
};
send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay, adnl_id_, overlay_id_.clone(),
std::make_unique<Callback>(), rules);
}
void tear_down() override {
send_closure(overlays_, &ton::overlay::Overlays::delete_overlay, adnl_id_, overlay_id_.compute_short_id());
}
void send_query(ton::PeerId src, ton::PeerId dst, td::BufferSlice query, td::Promise<td::BufferSlice> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
TRY_RESULT_PROMISE(promise, dst_id, peer_to_andl(dst));
query = ton::create_serialize_tl_object_suffix<ton::ton_api::storage_queryPrefix>(
std::move(query), overlay_id_.compute_short_id().bits256_value());
send_closure(rldp_, &ton_rldp::Rldp::send_query_ex, src_id, dst_id, "", std::move(promise), td::Timestamp::in(10),
std::move(query), 1 << 25);
}
void execute_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) {
data = data.from_slice(data.as_slice().substr(4 + 32));
auto src_id = register_adnl_id(src);
auto dst_id = register_adnl_id(dst);
auto it = peers_.find(std::make_pair(dst_id, src_id));
if (it == peers_.end()) {
auto node_it = nodes_.find(dst_id);
if (node_it == nodes_.end()) {
LOG(ERROR) << "Unknown query destination";
promise.set_error(td::Status::Error("Unknown query destination"));
return;
}
if (!node_it->second.is_alive()) {
LOG(ERROR) << "Expired query destination";
promise.set_error(td::Status::Error("Unknown query destination"));
return;
}
send_closure(node_it->second, &ton::NodeActor::start_peer, src_id,
[promise = std::move(promise),
data = std::move(data)](td::Result<td::actor::ActorId<ton::PeerActor>> r_peer) mutable {
TRY_RESULT_PROMISE(promise, peer, std::move(r_peer));
send_closure(peer, &ton::PeerActor::execute_query, std::move(data), std::move(promise));
});
return;
}
send_closure(it->second, &ton::PeerActor::execute_query, std::move(data), std::move(promise));
}
void register_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId<ton::PeerActor> peer) {
peers_[std::make_pair(src, dst)] = std::move(peer);
register_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void register_node(ton::PeerId src, td::actor::ActorId<ton::NodeActor> node) {
nodes_[src] = std::move(node);
register_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_node(ton::PeerId src, td::actor::ActorId<ton::NodeActor> node) {
auto it = nodes_.find(src);
CHECK(it != nodes_.end());
if (it->second == node) {
nodes_.erase(it);
}
unregister_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_peer(ton::PeerId src, ton::PeerId dst, td::actor::ActorId<ton::PeerActor> peer) {
auto it = peers_.find(std::make_pair(src, dst));
CHECK(it != peers_.end());
if (it->second == peer) {
peers_.erase(it);
}
unregister_src(src, [](td::Result<td::Unit> res) { res.ensure(); });
}
void unregister_src(ton::PeerId src, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
if (--subscribed_peers_[src] == 0) {
LOG(ERROR) << "Unsubscribe " << src_id;
subscribed_peers_.erase(src);
send_closure(adnl_, &ton::adnl::Adnl::unsubscribe, src_id,
ton::create_serialize_tl_object<ton::ton_api::storage_queryPrefix>(
overlay_id_.compute_short_id().bits256_value())
.as_slice()
.str());
}
promise.set_value({});
}
void register_src(ton::PeerId src, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, src_id, peer_to_andl(src));
class Callback : public ton::adnl::Adnl::Callback {
public:
Callback(td::actor::ActorId<PeerManager> peer_manager) : peer_manager_(std::move(peer_manager)) {
}
void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst,
td::BufferSlice data) override {
}
void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
td::Promise<td::BufferSlice> promise) override {
send_closure(peer_manager_, &PeerManager::execute_query, std::move(src), std::move(dst), std::move(data),
std::move(promise));
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
};
if (subscribed_peers_[src]++ == 0) {
LOG(ERROR) << "Subscribe " << src_id;
send_closure(adnl_, &ton::adnl::Adnl::subscribe, src_id,
ton::create_serialize_tl_object<ton::ton_api::storage_queryPrefix>(
overlay_id_.compute_short_id().bits256_value())
.as_slice()
.str(),
std::make_unique<Callback>(actor_id(this)));
}
promise.set_value({});
}
td::Result<ton::adnl::AdnlNodeIdShort> peer_to_andl(ton::PeerId id) {
if (id <= 0 || id > adnl_ids_.size()) {
return td::Status::Error(PSLICE() << "Invalid peer id " << id);
}
return adnl_ids_[id - 1];
}
ton::PeerId register_adnl_id(ton::adnl::AdnlNodeIdShort id) {
auto it = adnl_to_peer_id_.emplace(id, next_peer_id_);
if (it.second) {
LOG(ERROR) << "Register AndlId " << id << " -> " << it.first->second;
adnl_ids_.push_back(id);
next_peer_id_++;
}
return it.first->second;
}
void get_peers(td::Promise<std::vector<ton::PeerId>> promise) {
send_closure(overlays_, &ton::overlay::Overlays::get_overlay_random_peers, adnl_id_, overlay_id_.compute_short_id(),
30, promise.send_closure(actor_id(this), &PeerManager::got_overlay_random_peers));
}
private:
ton::adnl::AdnlNodeIdShort adnl_id_;
ton::overlay::OverlayIdFull overlay_id_;
td::actor::ActorId<ton::overlay::Overlays> overlays_;
td::actor::ActorId<ton::adnl::Adnl> adnl_;
td::actor::ActorId<ton_rldp::Rldp> rldp_;
std::map<std::pair<ton::PeerId, ton::PeerId>, td::actor::ActorId<ton::PeerActor>> peers_;
std::map<ton::PeerId, td::actor::ActorId<ton::NodeActor>> nodes_;
ton::PeerId next_peer_id_{1};
std::map<ton::adnl::AdnlNodeIdShort, ton::PeerId> adnl_to_peer_id_;
std::vector<ton::adnl::AdnlNodeIdShort> adnl_ids_;
std::map<ton::PeerId, td::uint32> subscribed_peers_;
void got_overlay_random_peers(td::Result<std::vector<ton::adnl::AdnlNodeIdShort>> r_peers,
td::Promise<std::vector<ton::PeerId>> promise) {
TRY_RESULT_PROMISE(promise, peers, std::move(r_peers));
std::vector<ton::PeerId> res;
for (auto peer : peers) {
res.push_back(register_adnl_id(peer));
}
promise.set_value(std::move(res));
}
};
class StorageCli : public td::actor::Actor {
public:
explicit StorageCli(StorageCliOptions options) : options_(std::move(options)) {
}
private:
StorageCliOptions options_;
td::actor::ActorOwn<td::TerminalIO> io_;
//td::actor::ActorOwn<DhtServer> dht_server_;
std::shared_ptr<ton::dht::DhtGlobalConfig> dht_config_;
td::actor::ActorOwn<ton::keyring::Keyring> keyring_;
td::actor::ActorOwn<ton::adnl::AdnlNetworkManager> adnl_network_manager_;
td::actor::ActorOwn<ton::adnl::Adnl> adnl_;
td::actor::ActorOwn<ton::dht::Dht> dht_;
td::actor::ActorOwn<ton::overlay::Overlays> overlays_;
td::actor::ActorOwn<ton_rldp::Rldp> rldp_;
//ton::PublicKeyHash default_dht_node_ = ton::PublicKeyHash::zero();
ton::PublicKey public_key_;
bool one_shot_{false};
bool is_closing_{false};
td::uint32 ref_cnt_{1};
td::Status load_global_config() {
TRY_RESULT_PREFIX(conf_data, td::read_file(options_.config), "failed to read: ");
TRY_RESULT_PREFIX(conf_json, td::json_decode(conf_data.as_slice()), "failed to parse json: ");
ton::ton_api::config_global conf;
TRY_STATUS_PREFIX(ton::ton_api::from_json(conf, conf_json.get_object()), "json does not fit TL scheme: ");
// TODO
// add adnl static nodes
//if (conf.adnl_) {
// td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config,
// std::move(conf.adnl_->static_nodes_));
//}
if (!conf.dht_) {
return td::Status::Error(ton::ErrorCode::error, "does not contain [dht] section");
}
TRY_RESULT_PREFIX(dht, ton::dht::Dht::create_global_config(std::move(conf.dht_)), "bad [dht] section: ");
dht_config_ = std::move(dht);
return td::Status::OK();
}
void start_up() override {
class Cb : public td::TerminalIO::Callback {
public:
void line_cb(td::BufferSlice line) override {
td::actor::send_closure(id_, &StorageCli::parse_line, std::move(line));
}
Cb(td::actor::ActorShared<StorageCli> id) : id_(std::move(id)) {
}
private:
td::actor::ActorShared<StorageCli> id_;
};
if (options_.cmd) {
one_shot_ = true;
td::actor::send_closure(actor_id(this), &StorageCli::parse_line, td::BufferSlice(options_.cmd.unwrap()));
} else {
ref_cnt_++;
io_ = td::TerminalIO::create("> ", options_.enable_readline, std::make_unique<Cb>(actor_shared(this)));
td::actor::send_closure(io_, &td::TerminalIO::set_log_interface);
}
if (!options_.config.empty()) {
init_network();
}
}
void init_network() {
load_global_config().ensure();
td::mkdir(options_.db_root).ignore();
keyring_ = ton::keyring::Keyring::create(options_.db_root + "/keyring");
adnl_network_manager_ = ton::adnl::AdnlNetworkManager::create(td::narrow_cast<td::int16>(options_.addr.get_port()));
adnl_ = ton::adnl::Adnl::create(options_.db_root, keyring_.get());
td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_network_manager, adnl_network_manager_.get());
rldp_ = ton_rldp::Rldp::create(adnl_.get());
auto key_path = options_.db_root + "/key.pub";
auto r_public_key = td::read_file(key_path).move_fmap([](auto raw) { return ton::PublicKey::import(raw); });
;
if (r_public_key.is_error()) {
auto private_key = ton::PrivateKey(ton::privkeys::Ed25519::random());
public_key_ = private_key.compute_public_key();
td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(private_key), false,
td::Promise<td::Unit>([key_path, str = public_key_.export_as_slice()](auto) {
td::write_file(key_path, str.as_slice()).ensure();
LOG(INFO) << "New key was saved";
}));
} else {
public_key_ = r_public_key.move_as_ok();
}
auto short_id = public_key_.compute_short_id();
LOG(ERROR) << "Create " << short_id;
std::set<AdnlCategory> cats;
cats = {0, 1, 2, 3};
ton::adnl::AdnlCategoryMask cat_mask;
for (auto cat : cats) {
cat_mask[cat] = true;
}
td::actor::send_closure(adnl_network_manager_, &ton::adnl::AdnlNetworkManager::add_self_addr, options_.addr,
std::move(cat_mask), cats.size() ? 0 : 1);
td::uint32 ts = static_cast<td::uint32>(td::Clocks::system());
std::map<td::uint32, ton::adnl::AdnlAddressList> addr_lists_;
for (auto cat : cats) {
CHECK(cat >= 0);
ton::adnl::AdnlAddress x = ton::adnl::AdnlAddressImpl::create(
ton::create_tl_object<ton::ton_api::adnl_address_udp>(options_.addr.get_ipv4(), options_.addr.get_port()));
addr_lists_[cat].add_addr(std::move(x));
addr_lists_[cat].set_version(ts);
addr_lists_[cat].set_reinit_date(ton::adnl::Adnl::adnl_start_time());
}
for (auto cat : cats) {
td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, ton::adnl::AdnlNodeIdFull{public_key_}, addr_lists_[cat],
static_cast<td::uint8>(cat));
td::actor::send_closure(rldp_, &ton_rldp::Rldp::add_id,
ton::adnl::AdnlNodeIdFull{public_key_}.compute_short_id());
}
dht_ =
ton::dht::Dht::create(ton::adnl::AdnlNodeIdShort{short_id}, "" /*NO db for dht! No wrong cache - no problems*/,
dht_config_, keyring_.get(), adnl_.get())
.move_as_ok();
send_closure(adnl_, &ton::Adnl::register_dht_node, dht_.get());
overlays_ = ton::overlay::Overlays::create(options_.db_root, keyring_.get(), adnl_.get(), dht_.get());
}
void exit(td::Result<td::Unit> res) {
if (one_shot_) {
td::TerminalIO::out() << "Done, exiting";
std::_Exit(res.is_ok() ? 0 : 2);
}
}
void parse_line(td::BufferSlice line) {
if (is_closing_) {
return;
}
td::ConstParser parser(line.as_slice());
auto cmd = parser.read_word();
if (cmd.empty()) {
return;
}
td::Promise<td::Unit> cmd_promise = [line = line.clone(), timer = td::PerfWarningTimer(line.as_slice().str()),
cli = actor_id(this)](td::Result<td::Unit> res) {
if (res.is_ok()) {
// on_ok
} else {
td::TerminalIO::out() << "Query {" << line.as_slice() << "} FAILED: \n\t" << res.error() << "\n";
}
send_closure(cli, &StorageCli::exit, std::move(res));
};
if (cmd == "help") {
td::TerminalIO::out() << "help\tThis help\n";
td::TerminalIO::out() << "create <dir/file>\tCreate torrent from a directory\n";
td::TerminalIO::out() << "info <id>\tPrint info about loaded torrent\n";
td::TerminalIO::out() << "load <file>\tLoad torrent file in memory\n";
td::TerminalIO::out() << "save <id> <file>\tSave torrent file\n";
td::TerminalIO::out() << "start <id>\tStart torrent downloading/uploading\n";
td::TerminalIO::out() << "seed <id>\tStart torrent uploading\n";
td::TerminalIO::out() << "download <id>\tStart torrent and stop when it is completed\n";
td::TerminalIO::out() << "stop <id>\tStop torrent downloading\n";
td::TerminalIO::out() << "pause <id>\tPause active torrent downloading\n";
td::TerminalIO::out() << "resume <id>\tResume active torrent downloading\n";
td::TerminalIO::out() << "priority <id> <file_id> <priority>\tSet file priority(0..254) by file_id, use "
"file_id=* to set priority for all files\n";
td::TerminalIO::out() << "exit\tExit\n";
td::TerminalIO::out() << "quit\tExit\n";
} else if (cmd == "exit" || cmd == "quit") {
quit();
} else if (cmd == "create") {
torrent_create(parser.read_all(), std::move(cmd_promise));
} else if (cmd == "info") {
torrent_info(parser.read_all(), std::move(cmd_promise));
} else if (cmd == "load") {
cmd_promise.set_result(torrent_load(parser.read_all()).move_map([](auto &&x) { return td::Unit(); }));
} else if (cmd == "save") {
auto id = parser.read_word();
parser.skip_whitespaces();
auto file = parser.read_all();
torrent_save(id, file, std::move(cmd_promise));
} else if (cmd == "start") {
auto id = parser.read_word();
torrent_start(id, false, true, std::move(cmd_promise));
} else if (cmd == "download") {
auto id = parser.read_word();
torrent_start(id, true, true, std::move(cmd_promise));
} else if (cmd == "seed") {
auto id = parser.read_word();
torrent_start(id, false, false, std::move(cmd_promise));
} else if (cmd == "stop") {
auto id = parser.read_word();
torrent_stop(id, std::move(cmd_promise));
} else if (cmd == "pause") {
auto id = parser.read_word();
torrent_set_should_download(id, false, std::move(cmd_promise));
} else if (cmd == "resume") {
auto id = parser.read_word();
torrent_set_should_download(id, true, std::move(cmd_promise));
} else if (cmd == "priority") {
torrent_set_priority(parser, std::move(cmd_promise));
} else if (cmd == "get") {
auto name = parser.read_word().str();
auto key = ton::dht::DhtKey(public_key_.compute_short_id(), name, 0);
send_closure(dht_, &ton::dht::Dht::get_value, std::move(key), cmd_promise.wrap([](auto &&res) {
LOG(ERROR) << to_string(res.tl());
return td::Unit();
}));
} else if (cmd == "set") {
auto name = parser.read_word().str();
parser.skip_whitespaces();
auto value = parser.read_all().str();
auto key = ton::dht::DhtKey(public_key_.compute_short_id(), name, 0);
auto dht_update_rule = ton::dht::DhtUpdateRuleSignature::create().move_as_ok();
ton::dht::DhtKeyDescription dht_key_description{key.clone(), public_key_, dht_update_rule, td::BufferSlice()};
auto to_sing = dht_key_description.to_sign();
send_closure(keyring_, &ton::keyring::Keyring::sign_message, public_key_.compute_short_id(), std::move(to_sing),
cmd_promise.send_closure(actor_id(this), &StorageCli::dht_set1, std::move(dht_key_description),
td::BufferSlice(value)));
} else {
cmd_promise.set_error(td::Status::Error(PSLICE() << "Unkwnown query `" << cmd << "`"));
}
if (cmd_promise) {
cmd_promise.set_value(td::Unit());
}
}
void dht_set1(ton::dht::DhtKeyDescription dht_key_description, td::BufferSlice value,
td::Result<td::BufferSlice> r_signature, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, signature, std::move(r_signature));
dht_key_description.update_signature(std::move(signature));
dht_key_description.check().ensure();
auto ttl = static_cast<td::uint32>(td::Clocks::system() + 3600);
ton::dht::DhtValue dht_value{dht_key_description.clone(), std::move(value), ttl, td::BufferSlice("")};
auto to_sign = dht_value.to_sign();
send_closure(keyring_, &ton::keyring::Keyring::sign_message, public_key_.compute_short_id(), std::move(to_sign),
promise.send_closure(actor_id(this), &StorageCli::dht_set2, std::move(dht_value)));
}
void dht_set2(ton::dht::DhtValue dht_value, td::Result<td::BufferSlice> r_signature, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, signature, std::move(r_signature));
dht_value.update_signature(std::move(signature));
dht_value.check().ensure();
send_closure(dht_, &ton::dht::Dht::set_value, std::move(dht_value), promise.wrap([](auto &&res) {
LOG(ERROR) << "OK";
return td::Unit();
}));
}
td::uint32 torrent_id_{0};
struct Info {
td::uint32 id;
td::Bits256 hash;
td::optional<ton::Torrent> torrent;
td::actor::ActorOwn<PeerManager> peer_manager;
td::actor::ActorOwn<ton::NodeActor> node;
};
std::map<td::uint32, Info> infos_;
void torrent_create(td::Slice path_raw, td::Promise<td::Unit> promise) {
auto path = td::trim(path_raw).str();
ton::Torrent::Creator::Options options;
options.piece_size = 128 * 1024;
TRY_RESULT_PROMISE(promise, torrent, ton::Torrent::Creator::create_from_path(options, path));
auto hash = torrent.get_info().header_hash;
for (auto &it : infos_) {
if (it.second.hash == hash) {
promise.set_error(td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")"));
return;
}
}
td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n";
infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn<PeerManager>(),
td::actor::ActorOwn<ton::NodeActor>()});
torrent_id_++;
promise.set_value(td::Unit());
}
td::Result<ton::Torrent *> to_torrent(td::Slice id_raw) {
TRY_RESULT(id, td::to_integer_safe<td::uint32>(td::trim(id_raw)));
auto it = infos_.find(id);
if (it == infos_.end()) {
return td::Status::Error(PSLICE() << "Invalid torrent id <" << id_raw << ">");
}
if (it->second.torrent) {
return &it->second.torrent.value();
}
return nullptr;
}
td::Result<Info *> to_info(td::Slice id_raw) {
TRY_RESULT(id, td::to_integer_safe<td::uint32>(td::trim(id_raw)));
auto it = infos_.find(id);
if (it == infos_.end()) {
return td::Status::Error(PSLICE() << "Invalid torrent id <" << id_raw << ">");
}
return &it->second;
}
td::Result<Info *> to_info_or_load(td::Slice id_raw) {
auto r_info = to_info(id_raw);
if (r_info.is_ok()) {
return r_info.ok();
}
return torrent_load(id_raw);
}
void torrent_info(td::Slice id_raw, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, info, to_info(id_raw));
if (info->torrent) {
td::TerminalIO::out() << info->torrent.value().get_stats_str();
promise.set_value(td::Unit());
} else {
send_closure(info->node, &ton::NodeActor::get_stats_str, promise.wrap([](std::string stats) {
td::TerminalIO::out() << stats;
return td::Unit();
}));
}
}
td::actor::ActorOwn<PeerManager> create_peer_manager(vm::Cell::Hash hash) {
// create overlay network
td::BufferSlice hash_str(hash.as_slice());
ton::overlay::OverlayIdFull overlay_id(std::move(hash_str));
auto adnl_id = ton::adnl::AdnlNodeIdShort{public_key_.compute_short_id()};
return td::actor::create_actor<PeerManager>("PeerManager", adnl_id, std::move(overlay_id), overlays_.get(),
adnl_.get(), rldp_.get());
}
void torrent_start(td::Slice id_raw, bool wait_download, bool should_download, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, ptr, to_info_or_load(id_raw));
if (!ptr->torrent) {
promise.set_error(td::Status::Error("torrent is already started"));
return;
}
if (ptr->peer_manager.empty()) {
ptr->peer_manager = create_peer_manager(ptr->torrent.value().get_info().get_hash());
}
ton::PeerId self_id = 1;
class Context : public ton::NodeActor::Callback {
public:
Context(td::actor::ActorId<PeerManager> peer_manager, td::actor::ActorId<StorageCli> storage_cli,
ton::PeerId self_id, td::uint32 torrent_id, td::Promise<td::Unit> on_completed)
: peer_manager_(peer_manager)
, storage_cli_(std::move(storage_cli))
, self_id_(self_id)
, torrent_id_(std::move(torrent_id))
, on_completed_(std::move(on_completed)) {
}
void get_peers(td::Promise<std::vector<ton::PeerId>> promise) override {
send_closure(peer_manager_, &PeerManager::get_peers, std::move(promise));
}
void register_self(td::actor::ActorId<ton::NodeActor> self) override {
CHECK(self_.empty());
self_ = self;
send_closure(peer_manager_, &PeerManager::register_node, self_id_, self_);
}
~Context() {
if (!self_.empty()) {
send_closure(peer_manager_, &PeerManager::unregister_node, self_id_, self_);
}
}
td::actor::ActorOwn<ton::PeerActor> create_peer(ton::PeerId self_id, ton::PeerId peer_id,
td::SharedState<ton::PeerState> state) override {
CHECK(self_id == self_id_);
class PeerCallback : public ton::PeerActor::Callback {
public:
PeerCallback(ton::PeerId self_id, ton::PeerId peer_id, td::actor::ActorId<PeerManager> peer_manager)
: self_id_(self_id), peer_id_(peer_id), peer_manager_(std::move(peer_manager)) {
}
void register_self(td::actor::ActorId<ton::PeerActor> self) override {
CHECK(self_.empty());
self_ = std::move(self);
send_closure(peer_manager_, &PeerManager::register_peer, self_id_, peer_id_, self_);
}
void send_query(td::uint64 query_id, td::BufferSlice query) override {
send_closure(peer_manager_, &PeerManager::send_query, self_id_, peer_id_, std::move(query),
promise_send_closure(self_, &ton::PeerActor::on_query_result, query_id));
}
~PeerCallback() {
if (!self_.empty()) {
send_closure(peer_manager_, &PeerManager::unregister_peer, self_id_, peer_id_, self_);
}
}
private:
td::actor::ActorId<ton::PeerActor> self_;
ton::PeerId self_id_;
ton::PeerId peer_id_;
td::actor::ActorId<PeerManager> peer_manager_;
};
return td::actor::create_actor<ton::PeerActor>(PSLICE() << "ton::PeerActor " << self_id << "->" << peer_id,
td::make_unique<PeerCallback>(self_id, peer_id, peer_manager_),
std::move(state));
}
void on_completed() override {
if (on_completed_) {
on_completed_.set_value(td::Unit());
}
td::TerminalIO::out() << "Torrent #" << torrent_id_ << " completed\n";
}
void on_closed(ton::Torrent torrent) override {
send_closure(storage_cli_, &StorageCli::got_torrent, torrent_id_, std::move(torrent));
}
private:
td::actor::ActorId<PeerManager> peer_manager_;
td::actor::ActorId<StorageCli> storage_cli_;
ton::PeerId self_id_;
td::uint32 torrent_id_;
std::vector<ton::PeerId> peers_;
td::Promise<td::Unit> on_completed_;
td::actor::ActorId<ton::NodeActor> self_;
};
td::Promise<td::Unit> on_completed;
if (wait_download) {
on_completed = std::move(promise);
}
auto context =
td::make_unique<Context>(ptr->peer_manager.get(), actor_id(this), self_id, ptr->id, std::move(on_completed));
ptr->node = td::actor::create_actor<ton::NodeActor>(PSLICE() << "Node#" << self_id, self_id, ptr->torrent.unwrap(),
std::move(context), should_download);
td::TerminalIO::out() << "Torrent #" << ptr->id << " started\n";
promise.release().release();
if (promise) {
promise.set_value(td::Unit());
}
}
void on_torrent_completed(td::uint32 torrent_id) {
td::TerminalIO::out() << "Torrent #" << torrent_id << " completed\n";
}
void got_torrent(td::uint32 torrent_id, ton::Torrent &&torrent) {
infos_[torrent_id].torrent = std::move(torrent);
td::TerminalIO::out() << "Torrent #" << torrent_id << " ready to start again\n";
}
void torrent_stop(td::Slice id_raw, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, ptr, to_info(id_raw));
ptr->node.reset();
ptr->peer_manager.reset();
promise.set_value(td::Unit());
td::TerminalIO::out() << "Torrent #" << ptr->id << " stopped\n";
}
void torrent_set_should_download(td::Slice id_raw, bool should_download, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, ptr, to_info(id_raw));
if (ptr->node.empty()) {
promise.set_error(td::Status::Error("Torrent is not active"));
return;
}
send_closure(ptr->node, &ton::NodeActor::set_should_download, should_download);
promise.set_value(td::Unit());
}
void torrent_set_priority(td::ConstParser &parser, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, ptr, to_info(parser.read_word()));
if (ptr->node.empty()) {
promise.set_error(td::Status::Error("Torrent is not active"));
return;
}
auto file_id_str = parser.read_word();
size_t file_id = std::numeric_limits<size_t>::max();
if (file_id_str != "*") {
TRY_RESULT_PROMISE_ASSIGN(promise, file_id, td::to_integer_safe<td::size_t>(file_id_str));
}
TRY_RESULT_PROMISE(promise, priority, td::to_integer_safe<td::uint8>(parser.read_word()));
if (priority == 255) {
promise.set_error(td::Status::Error("Priority = 255 is reserved"));
return;
}
send_closure(ptr->node, &ton::NodeActor::set_file_priority, file_id, priority);
promise.set_value(td::Unit());
}
void torrent_save(td::Slice id_raw, td::Slice path, td::Promise<td::Unit> promise) {
TRY_RESULT_PROMISE(promise, ptr, to_torrent(id_raw));
auto meta = ptr->get_meta(ton::Torrent::GetMetaOptions().with_proof_depth_limit(10));
TRY_STATUS_PROMISE(promise, td::write_file(path.str(), meta.serialize()));
promise.set_value(td::Unit());
td::TerminalIO::out() << "Torrent #" << id_raw << " saved\n";
}
td::Result<Info *> torrent_load(td::Slice path) {
TRY_RESULT(data, td::read_file(PSLICE() << td::trim(path)));
TRY_RESULT(meta, ton::TorrentMeta::deserialize(data));
ton::Torrent::Options options;
options.in_memory = false;
options.root_dir = ".";
options.validate = true;
TRY_RESULT(torrent, ton::Torrent::open(options, data));
auto hash = torrent.get_info().header_hash;
for (auto &it : infos_) {
if (it.second.hash == hash) {
return td::Status::Error(PSLICE() << "Torrent already loaded (#" << it.first << ")");
}
}
td::TerminalIO::out() << "Torrent #" << torrent_id_ << " created\n";
auto res =
infos_.emplace(torrent_id_, Info{torrent_id_, hash, std::move(torrent), td::actor::ActorOwn<PeerManager>(),
td::actor::ActorOwn<ton::NodeActor>()});
torrent_id_++;
return &res.first->second;
}
void hangup() override {
quit();
}
void hangup_shared() override {
CHECK(ref_cnt_ > 0);
ref_cnt_--;
//if (get_link_token() == 1) {
//io_.reset();
//}
try_stop();
}
void try_stop() {
if (is_closing_ && ref_cnt_ == 0) {
stop();
}
}
void quit() {
is_closing_ = true;
io_.reset();
//client_.reset();
ref_cnt_--;
try_stop();
}
void tear_down() override {
td::actor::SchedulerContext::get()->stop();
}
};
int main(int argc, char *argv[]) {
SET_VERBOSITY_LEVEL(verbosity_INFO);
td::set_default_failure_signal_handler();
StorageCliOptions options;
td::OptionsParser p;
p.set_description("experimental cli for ton storage");
p.add_option('h', "help", "prints_help", [&]() {
std::cout << (PSLICE() << p).c_str();
std::exit(2);
return td::Status::OK();
});
p.add_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) {
auto verbosity = td::to_integer<int>(arg);
SET_VERBOSITY_LEVEL(VERBOSITY_NAME(FATAL) + verbosity);
return (verbosity >= 0 && verbosity <= 20) ? td::Status::OK() : td::Status::Error("verbosity must be 0..20");
});
p.add_option('C', "config", "set ton config", [&](td::Slice arg) {
options.config = arg.str();
return td::Status::OK();
});
p.add_option('D', "db", "root for dbs", [&](td::Slice fname) {
options.db_root = fname.str();
return td::Status::OK();
});
p.add_option('I', "ip", "set ip:port", [&](td::Slice arg) {
td::IPAddress addr;
TRY_STATUS(addr.init_host_port(arg.str()));
options.addr = addr;
return td::Status::OK();
});
p.add_option('E', "execute", "execute one command", [&](td::Slice arg) {
options.cmd = arg.str();
return td::Status::OK();
});
p.add_option('d', "dir", "working directory", [&](td::Slice arg) { return td::chdir(arg.str()); });
auto S = p.run(argc, argv);
if (S.is_error()) {
std::cerr << S.move_as_error().message().str() << std::endl;
std::_Exit(2);
}
td::actor::Scheduler scheduler({0});
scheduler.run_in_context([&] { td::actor::create_actor<StorageCli>("console", options).release(); });
scheduler.run();
return 0;
}

1341
storage/test/storage.cpp Normal file

File diff suppressed because it is too large Load diff