mirror of
https://github.com/ton-blockchain/ton
synced 2025-02-12 11:12:16 +00:00
More verbose state serializer logs (#976)
Co-authored-by: SpyCheese <mikle98@yandex.ru>
This commit is contained in:
parent
9a543c6b28
commit
037053fffe
2 changed files with 63 additions and 27 deletions
|
@ -14,6 +14,9 @@
|
|||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#include "td/utils/Time.h"
|
||||
#include "td/utils/Timer.h"
|
||||
|
||||
#include <map>
|
||||
#include "vm/boc.h"
|
||||
#include "vm/boc-writers.h"
|
||||
|
@ -30,7 +33,8 @@ class LargeBocSerializer {
|
|||
public:
|
||||
using Hash = Cell::Hash;
|
||||
|
||||
explicit LargeBocSerializer(std::shared_ptr<CellDbReader> reader) : reader(std::move(reader)) {}
|
||||
explicit LargeBocSerializer(std::shared_ptr<CellDbReader> reader) : reader(std::move(reader)) {
|
||||
}
|
||||
|
||||
void add_root(Hash root);
|
||||
td::Status import_cells();
|
||||
|
@ -65,7 +69,8 @@ class LargeBocSerializer {
|
|||
std::map<Hash, CellInfo> cells;
|
||||
std::vector<std::pair<const Hash, CellInfo>*> cell_list;
|
||||
struct RootInfo {
|
||||
RootInfo(Hash hash, int idx) : hash(hash), idx(idx) {}
|
||||
RootInfo(Hash hash, int idx) : hash(hash), idx(idx) {
|
||||
}
|
||||
Hash hash;
|
||||
int idx;
|
||||
};
|
||||
|
@ -78,6 +83,10 @@ class LargeBocSerializer {
|
|||
void reorder_cells();
|
||||
int revisit(int cell_idx, int force = 0);
|
||||
td::uint64 compute_sizes(int mode, int& r_size, int& o_size);
|
||||
|
||||
td::Timestamp log_speed_at_;
|
||||
size_t processed_cells_ = 0;
|
||||
static constexpr double LOG_SPEED_PERIOD = 120.0;
|
||||
};
|
||||
|
||||
void LargeBocSerializer::add_root(Hash root) {
|
||||
|
@ -85,12 +94,16 @@ void LargeBocSerializer::add_root(Hash root) {
|
|||
}
|
||||
|
||||
td::Status LargeBocSerializer::import_cells() {
|
||||
td::Timer timer;
|
||||
log_speed_at_ = td::Timestamp::in(LOG_SPEED_PERIOD);
|
||||
processed_cells_ = 0;
|
||||
for (auto& root : roots) {
|
||||
TRY_RESULT(idx, import_cell(root.hash));
|
||||
root.idx = idx;
|
||||
}
|
||||
reorder_cells();
|
||||
CHECK(!cell_list.empty());
|
||||
LOG(ERROR) << "serializer: import_cells took " << timer.elapsed() << "s, " << cell_count << " cells";
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
|
@ -98,6 +111,12 @@ td::Result<int> LargeBocSerializer::import_cell(Hash hash, int depth) {
|
|||
if (depth > Cell::max_depth) {
|
||||
return td::Status::Error("error while importing a cell into a bag of cells: cell depth too large");
|
||||
}
|
||||
++processed_cells_;
|
||||
if (log_speed_at_.is_in_past()) {
|
||||
log_speed_at_ += LOG_SPEED_PERIOD;
|
||||
LOG(WARNING) << "serializer: import_cells " << (double)processed_cells_ / LOG_SPEED_PERIOD << " cells/s";
|
||||
processed_cells_ = 0;
|
||||
}
|
||||
auto it = cells.find(hash);
|
||||
if (it != cells.end()) {
|
||||
it->second.should_cache = true;
|
||||
|
@ -282,6 +301,7 @@ td::uint64 LargeBocSerializer::compute_sizes(int mode, int& r_size, int& o_size)
|
|||
}
|
||||
|
||||
td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
||||
td::Timer timer;
|
||||
using Mode = BagOfCells::Mode;
|
||||
BagOfCells::Info info;
|
||||
if ((mode & Mode::WithCacheBits) && !(mode & Mode::WithIndex)) {
|
||||
|
@ -313,13 +333,9 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
return td::Status::Error("bag of cells is too large");
|
||||
}
|
||||
|
||||
boc_writers::FileWriter writer{fd, (size_t) info.total_size};
|
||||
auto store_ref = [&](unsigned long long value) {
|
||||
writer.store_uint(value, info.ref_byte_size);
|
||||
};
|
||||
auto store_offset = [&](unsigned long long value) {
|
||||
writer.store_uint(value, info.offset_byte_size);
|
||||
};
|
||||
boc_writers::FileWriter writer{fd, (size_t)info.total_size};
|
||||
auto store_ref = [&](unsigned long long value) { writer.store_uint(value, info.ref_byte_size); };
|
||||
auto store_offset = [&](unsigned long long value) { writer.store_uint(value, info.offset_byte_size); };
|
||||
|
||||
writer.store_uint(info.magic, 4);
|
||||
|
||||
|
@ -371,6 +387,8 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
}
|
||||
DCHECK(writer.position() == info.data_offset);
|
||||
size_t keep_position = writer.position();
|
||||
log_speed_at_ = td::Timestamp::in(LOG_SPEED_PERIOD);
|
||||
processed_cells_ = 0;
|
||||
for (int i = 0; i < cell_count; ++i) {
|
||||
auto hash = cell_list[cell_count - 1 - i]->first;
|
||||
const auto& dc_info = cell_list[cell_count - 1 - i]->second;
|
||||
|
@ -389,6 +407,12 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
DCHECK(k > i && k < cell_count);
|
||||
store_ref(k);
|
||||
}
|
||||
++processed_cells_;
|
||||
if (log_speed_at_.is_in_past()) {
|
||||
log_speed_at_ += LOG_SPEED_PERIOD;
|
||||
LOG(WARNING) << "serializer: serialize " << (double)processed_cells_ / LOG_SPEED_PERIOD << " cells/s";
|
||||
processed_cells_ = 0;
|
||||
}
|
||||
}
|
||||
DCHECK(writer.position() - keep_position == info.data_size);
|
||||
if (info.has_crc32c) {
|
||||
|
@ -396,17 +420,23 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
writer.store_uint(td::bswap32(crc), 4);
|
||||
}
|
||||
DCHECK(writer.empty());
|
||||
return writer.finalize();
|
||||
}
|
||||
TRY_STATUS(writer.finalize());
|
||||
LOG(ERROR) << "serializer: serialize took " << timer.elapsed() << "s, " << cell_count << " cells, "
|
||||
<< writer.position() << " bytes";
|
||||
return td::Status::OK();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
td::Status std_boc_serialize_to_file_large(std::shared_ptr<CellDbReader> reader, Cell::Hash root_hash,
|
||||
td::FileFd& fd, int mode) {
|
||||
td::Status std_boc_serialize_to_file_large(std::shared_ptr<CellDbReader> reader, Cell::Hash root_hash, td::FileFd& fd,
|
||||
int mode) {
|
||||
td::Timer timer;
|
||||
CHECK(reader != nullptr)
|
||||
LargeBocSerializer serializer(reader);
|
||||
serializer.add_root(root_hash);
|
||||
TRY_STATUS(serializer.import_cells());
|
||||
return serializer.serialize(fd, mode);
|
||||
TRY_STATUS(serializer.serialize(fd, mode));
|
||||
LOG(ERROR) << "serialization took " << timer.elapsed() << "s";
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
}
|
||||
} // namespace vm
|
||||
|
|
|
@ -132,27 +132,33 @@ void AsyncStateSerializer::next_iteration() {
|
|||
if (attempt_ < max_attempt() && last_key_block_id_.id.seqno < last_block_id_.id.seqno &&
|
||||
need_serialize(masterchain_handle_)) {
|
||||
if (!have_masterchain_state_) {
|
||||
LOG(INFO) << "started serializing persistent state for " << masterchain_handle_->id().id;
|
||||
LOG(ERROR) << "started serializing persistent state for " << masterchain_handle_->id().id.to_str();
|
||||
// block next attempts immediately, but send actual request later
|
||||
running_ = true;
|
||||
double delay = td::Random::fast(0, 3600);
|
||||
LOG(WARNING) << "serializer delay = " << delay << "s";
|
||||
delay_action([SelfId = actor_id(
|
||||
this)]() { td::actor::send_closure(SelfId, &AsyncStateSerializer::request_masterchain_state); },
|
||||
td::Timestamp::in(td::Random::fast(0, 3600)));
|
||||
td::Timestamp::in(delay));
|
||||
return;
|
||||
}
|
||||
while (next_idx_ < shards_.size()) {
|
||||
if (!need_monitor(shards_[next_idx_].shard_full())) {
|
||||
next_idx_++;
|
||||
} else {
|
||||
// block next attempts immediately, but send actual request later
|
||||
running_ = true;
|
||||
delay_action(
|
||||
[SelfId = actor_id(this), shard = shards_[next_idx_]]() { td::actor::send_closure(SelfId, &AsyncStateSerializer::request_shard_state, shard); },
|
||||
td::Timestamp::in(td::Random::fast(0, 1800)));
|
||||
// block next attempts immediately, but send actual request later
|
||||
running_ = true;
|
||||
double delay = td::Random::fast(0, 1800);
|
||||
LOG(WARNING) << "serializer delay = " << delay << "s";
|
||||
delay_action(
|
||||
[SelfId = actor_id(this), shard = shards_[next_idx_]]() {
|
||||
td::actor::send_closure(SelfId, &AsyncStateSerializer::request_shard_state, shard);
|
||||
},
|
||||
td::Timestamp::in(delay));
|
||||
return;
|
||||
}
|
||||
}
|
||||
LOG(INFO) << "finished serializing persistent state for " << masterchain_handle_->id().id;
|
||||
LOG(ERROR) << "finished serializing persistent state for " << masterchain_handle_->id().id.to_str();
|
||||
last_key_block_ts_ = masterchain_handle_->unix_time();
|
||||
last_key_block_id_ = masterchain_handle_->id();
|
||||
}
|
||||
|
@ -194,7 +200,7 @@ void AsyncStateSerializer::got_masterchain_handle(BlockHandle handle) {
|
|||
|
||||
void AsyncStateSerializer::got_masterchain_state(td::Ref<MasterchainState> state,
|
||||
std::shared_ptr<vm::CellDbReader> cell_db_reader) {
|
||||
LOG(INFO) << "serializing masterchain state " << masterchain_handle_->id().id;
|
||||
LOG(ERROR) << "serializing masterchain state " << masterchain_handle_->id().id.to_str();
|
||||
have_masterchain_state_ = true;
|
||||
CHECK(next_idx_ == 0);
|
||||
CHECK(shards_.size() == 0);
|
||||
|
@ -217,7 +223,7 @@ void AsyncStateSerializer::got_masterchain_state(td::Ref<MasterchainState> state
|
|||
}
|
||||
|
||||
void AsyncStateSerializer::stored_masterchain_state() {
|
||||
LOG(INFO) << "finished serializing masterchain state " << masterchain_handle_->id().id;
|
||||
LOG(ERROR) << "finished serializing masterchain state " << masterchain_handle_->id().id.to_str();
|
||||
running_ = false;
|
||||
next_iteration();
|
||||
}
|
||||
|
@ -247,13 +253,13 @@ void AsyncStateSerializer::got_shard_handle(BlockHandle handle) {
|
|||
|
||||
void AsyncStateSerializer::got_shard_state(BlockHandle handle, td::Ref<ShardState> state,
|
||||
std::shared_ptr<vm::CellDbReader> cell_db_reader) {
|
||||
LOG(INFO) << "serializing shard state " << handle->id().id;
|
||||
LOG(ERROR) << "serializing shard state " << handle->id().id.to_str();
|
||||
auto write_data = [hash = state->root_cell()->get_hash(), cell_db_reader](td::FileFd& fd) {
|
||||
return vm::std_boc_serialize_to_file_large(cell_db_reader, hash, fd, 31);
|
||||
};
|
||||
auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result<td::Unit> R) {
|
||||
R.ensure();
|
||||
LOG(INFO) << "finished serializing shard state " << handle->id().id;
|
||||
LOG(ERROR) << "finished serializing shard state " << handle->id().id.to_str();
|
||||
td::actor::send_closure(SelfId, &AsyncStateSerializer::success_handler);
|
||||
});
|
||||
td::actor::send_closure(manager_, &ValidatorManager::store_persistent_state_file_gen, handle->id(),
|
||||
|
|
Loading…
Reference in a new issue