mirror of
https://github.com/ton-blockchain/ton
synced 2025-02-12 11:12:16 +00:00
Add "--copy" to storage-daemon-cli create command (#576)
* Add "--copy" flag to "create", improve console output * Hide excessive logs
This commit is contained in:
parent
eff610f807
commit
ad736c6bc3
9 changed files with 77 additions and 18 deletions
|
@ -40,7 +40,7 @@ void BdwStats::on_packet_ack(const PacketInfo &info, td::Timestamp sent_at, td::
|
|||
auto ack_passed = now.at() - info.delivered_now.at();
|
||||
auto passed = td::max(sent_passed, ack_passed);
|
||||
if (passed < 0.01) {
|
||||
VLOG(RLDP_WARNING) << "Invalid passed " << passed;
|
||||
VLOG(RLDP_INFO) << "Invalid passed " << passed;
|
||||
}
|
||||
auto delivered = delivered_count - info.delivered_count;
|
||||
on_rate_sample((double)delivered / passed, now, info.is_paused);
|
||||
|
|
|
@ -25,11 +25,11 @@ namespace ton {
|
|||
namespace rldp2 {
|
||||
void RttStats::on_rtt_sample(double rtt_sample, double ack_delay, td::Timestamp now) {
|
||||
if (rtt_sample < 0.001 || rtt_sample > 10) {
|
||||
VLOG(RLDP_WARNING) << "Suspicious rtt sample " << rtt_sample;
|
||||
VLOG(RLDP_INFO) << "Suspicious rtt sample " << rtt_sample;
|
||||
return;
|
||||
}
|
||||
if (ack_delay < -1e-9 || ack_delay > 10) {
|
||||
VLOG(RLDP_WARNING) << "Suspicious ack_delay " << ack_delay;
|
||||
VLOG(RLDP_INFO) << "Suspicious ack_delay " << ack_delay;
|
||||
return;
|
||||
}
|
||||
rtt_sample = td::max(0.01, rtt_sample);
|
||||
|
|
|
@ -698,4 +698,37 @@ void Torrent::load_from_files(std::string files_path) {
|
|||
}
|
||||
}
|
||||
|
||||
td::Status Torrent::copy_to(const std::string &new_root_dir) {
|
||||
if (!is_completed() || included_size_ != info_.file_size) {
|
||||
return td::Status::Error("Torrent::copy_to is allowed only for fully completed torrents");
|
||||
}
|
||||
auto get_new_chunk_path = [&](td::Slice name) -> std::string {
|
||||
return PSTRING() << new_root_dir << TD_DIR_SLASH << header_.value().dir_name << TD_DIR_SLASH << name;
|
||||
};
|
||||
std::vector<td::BlobView> new_blobs;
|
||||
for (size_t i = 1; i < chunks_.size(); ++i) {
|
||||
auto &chunk = chunks_[i];
|
||||
std::string new_path = get_new_chunk_path(chunk.name);
|
||||
TRY_STATUS(td::mkpath(new_path));
|
||||
TRY_RESULT(new_blob, td::FileNoCacheBlobView::create(new_path, chunk.size, true));
|
||||
static const td::uint64 BUF_SIZE = 1 << 17;
|
||||
td::BufferSlice buf(BUF_SIZE);
|
||||
for (td::uint64 l = 0; l < chunk.size; l += BUF_SIZE) {
|
||||
td::uint64 r = std::min(chunk.size, l + BUF_SIZE);
|
||||
TRY_RESULT_PREFIX(s, chunk.data.view(buf.as_slice().substr(0, r - l), l),
|
||||
PSTRING() << "Failed to read " << chunk.name << ": ");
|
||||
if (s.size() != r - l) {
|
||||
return td::Status::Error(PSTRING() << "Failed to read " << chunk.name);
|
||||
}
|
||||
TRY_STATUS_PREFIX(new_blob.write(s, l), PSTRING() << "Failed to write " << chunk.name << ": ");
|
||||
}
|
||||
new_blobs.push_back(std::move(new_blob));
|
||||
}
|
||||
root_dir_ = new_root_dir;
|
||||
for (size_t i = 1; i < chunks_.size(); ++i) {
|
||||
chunks_[i].data = std::move(new_blobs[i - 1]);
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
} // namespace ton
|
||||
|
|
|
@ -159,6 +159,8 @@ class Torrent {
|
|||
|
||||
void load_from_files(std::string files_path);
|
||||
|
||||
td::Status copy_to(const std::string& new_root_dir);
|
||||
|
||||
private:
|
||||
td::Bits256 hash_;
|
||||
bool inited_info_ = false;
|
||||
|
|
|
@ -136,7 +136,7 @@ void StorageManager::add_torrent(Torrent torrent, bool start_download, bool allo
|
|||
td::Status StorageManager::add_torrent_impl(Torrent torrent, bool start_download, bool allow_upload) {
|
||||
td::Bits256 hash = torrent.get_hash();
|
||||
if (torrents_.count(hash)) {
|
||||
return td::Status::Error("Cannot add torrent: duplicate hash");
|
||||
return td::Status::Error(PSTRING() << "Cannot add torrent: duplicate hash " << hash.to_hex());
|
||||
}
|
||||
TorrentEntry& entry = torrents_[hash];
|
||||
entry.hash = hash;
|
||||
|
|
|
@ -299,6 +299,7 @@ class StorageDaemonCli : public td::actor::Actor {
|
|||
std::string path;
|
||||
bool found_path = false;
|
||||
bool upload = true;
|
||||
bool copy = false;
|
||||
std::string description;
|
||||
bool json = false;
|
||||
for (size_t i = 1; i < tokens.size(); ++i) {
|
||||
|
@ -315,6 +316,10 @@ class StorageDaemonCli : public td::actor::Actor {
|
|||
upload = false;
|
||||
continue;
|
||||
}
|
||||
if (tokens[i] == "--copy") {
|
||||
copy = true;
|
||||
continue;
|
||||
}
|
||||
if (tokens[i] == "--json") {
|
||||
json = true;
|
||||
continue;
|
||||
|
@ -330,7 +335,7 @@ class StorageDaemonCli : public td::actor::Actor {
|
|||
if (!found_path) {
|
||||
return td::Status::Error("Unexpected EOLN");
|
||||
}
|
||||
return execute_create(std::move(path), std::move(description), upload, json);
|
||||
return execute_create(std::move(path), std::move(description), upload, copy, json);
|
||||
} else if (tokens[0] == "add-by-hash" || tokens[0] == "add-by-meta") {
|
||||
td::optional<std::string> param;
|
||||
std::string root_dir;
|
||||
|
@ -748,9 +753,10 @@ class StorageDaemonCli : public td::actor::Actor {
|
|||
td::Status execute_help() {
|
||||
td::TerminalIO::out() << "help\tPrint this help\n";
|
||||
td::TerminalIO::out()
|
||||
<< "create [-d description] [--no-upload] [--json] <file/dir>\tCreate bag of files from <file/dir>\n";
|
||||
<< "create [-d description] [--no-upload] [--copy] [--json] <file/dir>\tCreate bag of files from <file/dir>\n";
|
||||
td::TerminalIO::out() << "\t-d\tDescription will be stored in torrent info\n";
|
||||
td::TerminalIO::out() << "\t--no-upload\tDon't share bag with peers\n";
|
||||
td::TerminalIO::out() << "\t--copy\tFiles will be copied to an internal directory of storage-daemon\n";
|
||||
td::TerminalIO::out() << "\t--json\tOutput in json\n";
|
||||
td::TerminalIO::out() << "add-by-hash <bag-id> [-d root_dir] [--paused] [--no-upload] [--json] [--partial file1 "
|
||||
"file2 ...]\tAdd bag with given BagID (in hex)\n";
|
||||
|
@ -848,9 +854,9 @@ class StorageDaemonCli : public td::actor::Actor {
|
|||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status execute_create(std::string path, std::string description, bool upload, bool json) {
|
||||
td::Status execute_create(std::string path, std::string description, bool upload, bool copy, bool json) {
|
||||
TRY_RESULT_PREFIX_ASSIGN(path, td::realpath(path), "Invalid path: ");
|
||||
auto query = create_tl_object<ton_api::storage_daemon_createTorrent>(path, description, upload);
|
||||
auto query = create_tl_object<ton_api::storage_daemon_createTorrent>(path, description, upload, copy);
|
||||
send_query(std::move(query),
|
||||
[=, SelfId = actor_id(this)](td::Result<tl_object_ptr<ton_api::storage_daemon_torrentFull>> R) {
|
||||
if (R.is_error()) {
|
||||
|
@ -1662,7 +1668,7 @@ class StorageDaemonCli : public td::actor::Actor {
|
|||
}
|
||||
snprintf(str, sizeof(str), "%6u: (%s) %7s/%-7s %s ", i, priority,
|
||||
f->priority_ == 0 ? "---" : size_to_str(f->downloaded_size_).c_str(), size_to_str(f->size_).c_str(),
|
||||
(f->downloaded_size_ == f->size_ ? "+" : " "));
|
||||
((f->downloaded_size_ == f->size_ && f->priority_ > 0) ? "+" : " "));
|
||||
td::TerminalIO::out() << str << f->name_ << "\n";
|
||||
++i;
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ class StorageDaemon : public td::actor::Actor {
|
|||
void start_up() override {
|
||||
CHECK(db_root_ != "");
|
||||
td::mkdir(db_root_).ensure();
|
||||
db_root_ = td::realpath(db_root_).move_as_ok();
|
||||
keyring_ = keyring::Keyring::create(db_root_ + "/keyring");
|
||||
{
|
||||
auto S = load_global_config();
|
||||
|
@ -285,20 +286,36 @@ class StorageDaemon : public td::actor::Actor {
|
|||
void run_control_query(ton_api::storage_daemon_createTorrent &query, td::Promise<td::BufferSlice> promise) {
|
||||
// Run in a separate thread
|
||||
delay_action(
|
||||
[promise = std::move(promise), manager = manager_.get(), query = std::move(query)]() mutable {
|
||||
[promise = std::move(promise), manager = manager_.get(), db_root = db_root_,
|
||||
query = std::move(query)]() mutable {
|
||||
Torrent::Creator::Options options;
|
||||
options.piece_size = 128 * 1024;
|
||||
options.description = std::move(query.description_);
|
||||
TRY_RESULT_PROMISE(promise, torrent, Torrent::Creator::create_from_path(std::move(options), query.path_));
|
||||
td::Bits256 hash = torrent.get_hash();
|
||||
td::Promise<td::Unit> P = [manager, hash, promise = std::move(promise)](td::Result<td::Unit> R) mutable {
|
||||
TRY_RESULT_PROMISE(promise, unit, std::move(R));
|
||||
get_torrent_info_full_serialized(manager, hash, std::move(promise));
|
||||
};
|
||||
if (query.copy_inside_) {
|
||||
P = [P = std::move(P), manager, hash, db_root](td::Result<td::Unit> R) mutable {
|
||||
TRY_RESULT_PROMISE(P, unit, std::move(R));
|
||||
td::actor::send_closure(manager, &StorageManager::with_torrent, hash,
|
||||
P.wrap([=](NodeActor::NodeState state) -> td::Status {
|
||||
std::string dir = db_root + "/torrent/torrent-files/" + hash.to_hex();
|
||||
LOG(INFO) << "Copying torrent to " << dir;
|
||||
auto S = state.torrent.copy_to(dir);
|
||||
if (S.is_error()) {
|
||||
LOG(WARNING) << "Copying torrent to " << dir << ": " << S;
|
||||
td::actor::send_closure(manager, &StorageManager::remove_torrent, hash, false,
|
||||
[](td::Result<td::Unit>) {});
|
||||
}
|
||||
return S;
|
||||
}));
|
||||
};
|
||||
}
|
||||
td::actor::send_closure(manager, &StorageManager::add_torrent, std::move(torrent), false, query.allow_upload_,
|
||||
[manager, hash, promise = std::move(promise)](td::Result<td::Unit> R) mutable {
|
||||
if (R.is_error()) {
|
||||
promise.set_error(R.move_as_error());
|
||||
} else {
|
||||
get_torrent_info_full_serialized(manager, hash, std::move(promise));
|
||||
}
|
||||
});
|
||||
std::move(P));
|
||||
},
|
||||
td::Timestamp::now());
|
||||
}
|
||||
|
@ -810,6 +827,7 @@ class StorageDaemon : public td::actor::Actor {
|
|||
auto obj = create_tl_object<ton_api::storage_daemon_torrentFull>();
|
||||
fill_torrent_info_full(state.torrent, *obj);
|
||||
obj->torrent_->active_download_ = state.active_download;
|
||||
obj->torrent_->active_upload_ = state.active_upload;
|
||||
obj->torrent_->download_speed_ = state.download_speed;
|
||||
obj->torrent_->upload_speed_ = state.upload_speed;
|
||||
for (size_t i = 0; i < obj->files_.size(); ++i) {
|
||||
|
|
|
@ -829,7 +829,7 @@ storage.daemon.providerAddress address:string = storage.daemon.ProviderAddress;
|
|||
|
||||
---functions---
|
||||
storage.daemon.setVerbosity verbosity:int = storage.daemon.Success;
|
||||
storage.daemon.createTorrent path:string description:string allow_upload:Bool = storage.daemon.TorrentFull;
|
||||
storage.daemon.createTorrent path:string description:string allow_upload:Bool copy_inside:Bool = storage.daemon.TorrentFull;
|
||||
storage.daemon.addByHash hash:int256 root_dir:string start_download:Bool allow_upload:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull;
|
||||
storage.daemon.addByMeta meta:bytes root_dir:string start_download:Bool allow_upload:Bool priorities:(vector storage.PriorityAction) = storage.daemon.TorrentFull;
|
||||
storage.daemon.setActiveDownload hash:int256 active:Bool = storage.daemon.Success;
|
||||
|
|
Binary file not shown.
Loading…
Reference in a new issue