mirror of
https://github.com/ton-blockchain/ton
synced 2025-03-09 15:40:10 +00:00
Merge pull request #1288 from ton-blockchain/testnet
Merge developer branch
This commit is contained in:
commit
d4de286f1b
234 changed files with 8388 additions and 1702 deletions
|
@ -28,5 +28,5 @@ jobs:
|
|||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: tonlib-android
|
||||
name: ton-android-tonlib
|
||||
path: artifacts
|
||||
|
|
|
@ -21,6 +21,12 @@ jobs:
|
|||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev liblz4-dev libjemalloc-dev
|
||||
|
||||
- if: matrix.os == 'ubuntu-20.04'
|
||||
run: |
|
||||
sudo apt install -y manpages-dev software-properties-common
|
||||
sudo add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
sudo apt update && sudo apt install gcc-11 g++-11
|
||||
|
||||
- if: matrix.os != 'ubuntu-24.04'
|
||||
run: |
|
||||
wget https://apt.llvm.org/llvm.sh
|
||||
|
|
25
.github/workflows/build-ton-macos-15-arm64-shared.yml
vendored
Normal file
25
.github/workflows/build-ton-macos-15-arm64-shared.yml
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
name: MacOS-15 TON build (shared, arm64)
|
||||
|
||||
on: [push,workflow_dispatch,workflow_call]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: macos-15
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: 'recursive'
|
||||
|
||||
- name: Build TON
|
||||
run: |
|
||||
cp assembly/native/build-macos-shared.sh .
|
||||
chmod +x build-macos-shared.sh
|
||||
./build-macos-shared.sh -t -a
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: ton-binaries-macos-15
|
||||
path: artifacts
|
|
@ -1,4 +1,4 @@
|
|||
name: MacOS TON build (shared, arm64)
|
||||
name: MacOS-14 TON build (shared, arm64)
|
||||
|
||||
on: [push,workflow_dispatch,workflow_call]
|
||||
|
||||
|
|
16
.github/workflows/create-release.yml
vendored
16
.github/workflows/create-release.yml
vendored
|
@ -83,6 +83,14 @@ jobs:
|
|||
workflow_conclusion: success
|
||||
skip_unpack: true
|
||||
|
||||
- name: Download Android Tonlib artifacts
|
||||
uses: dawidd6/action-download-artifact@v2
|
||||
with:
|
||||
workflow: build-ton-linux-android-tonlib.yml
|
||||
path: artifacts
|
||||
workflow_conclusion: success
|
||||
skip_unpack: true
|
||||
|
||||
- name: Show all artifacts
|
||||
run: |
|
||||
tree artifacts
|
||||
|
@ -501,3 +509,11 @@ jobs:
|
|||
file: artifacts/ton-wasm-binaries.zip
|
||||
asset_name: ton-wasm-binaries.zip
|
||||
tag: ${{ steps.tag.outputs.TAG }}
|
||||
|
||||
- name: Upload Android Tonlib artifacts
|
||||
uses: svenstaro/upload-release-action@v2
|
||||
with:
|
||||
repo_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
file: artifacts/ton-android-tonlib.zip
|
||||
asset_name: ton-android-tonlib.zip
|
||||
tag: ${{ steps.tag.outputs.TAG }}
|
||||
|
|
2
.github/workflows/ton-ccpcheck.yml
vendored
2
.github/workflows/ton-ccpcheck.yml
vendored
|
@ -20,7 +20,7 @@ jobs:
|
|||
generate report: true
|
||||
|
||||
- name: Upload report
|
||||
uses: actions/upload-artifact@v1
|
||||
uses: actions/upload-artifact@master
|
||||
with:
|
||||
name: ton-ccpcheck-report
|
||||
path: output
|
||||
|
|
|
@ -127,11 +127,17 @@ elseif (WIN32)
|
|||
endif()
|
||||
string(APPEND _PLATFORM_PATH "/$$CONFIG$$")
|
||||
|
||||
message(STATUS "MSVC_VERSION ${MSVC_VERSION}")
|
||||
if (MSVC_VERSION LESS 1900)
|
||||
math(EXPR _VS_VERSION "${MSVC_VERSION} / 10 - 60")
|
||||
else()
|
||||
if (MSVC_VERSION EQUAL 1941)
|
||||
math(EXPR _VS_VERSION "${MSVC_VERSION} / 10 - 51")
|
||||
else()
|
||||
math(EXPR _VS_VERSION "${MSVC_VERSION} / 10 - 50")
|
||||
endif()
|
||||
|
||||
endif()
|
||||
string(APPEND _PLATFORM_PATH "/v${_VS_VERSION}")
|
||||
|
||||
if (SODIUM_USE_STATIC_LIBS)
|
||||
|
|
|
@ -79,7 +79,7 @@ else()
|
|||
set(HAVE_SSE42 FALSE)
|
||||
endif()
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
set(CMAKE_CXX_STANDARD 20)
|
||||
set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
|
||||
set(CMAKE_CXX_EXTENSIONS FALSE)
|
||||
|
||||
|
@ -333,6 +333,10 @@ add_cxx_compiler_flag("-Wno-sign-conversion")
|
|||
add_cxx_compiler_flag("-Qunused-arguments")
|
||||
add_cxx_compiler_flag("-Wno-unused-private-field")
|
||||
add_cxx_compiler_flag("-Wno-redundant-move")
|
||||
|
||||
#add_cxx_compiler_flag("-Wno-unused-function")
|
||||
#add_cxx_compiler_flag("-Wno-unused-variable")
|
||||
#add_cxx_compiler_flag("-Wno-shorten-64-to-32")
|
||||
#add_cxx_compiler_flag("-Werror")
|
||||
|
||||
#set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -isystem /usr/include/c++/v1")
|
||||
|
@ -484,10 +488,10 @@ target_link_libraries(test-net PRIVATE tdnet tdutils ${CMAKE_THREAD_LIBS_INIT})
|
|||
|
||||
#BEGIN tonlib
|
||||
add_executable(test-tonlib ${TONLIB_ONLINE_TEST_SOURCE})
|
||||
target_link_libraries(test-tonlib tdutils tdactor adnllite tl_api ton_crypto ton_block tl_tonlib_api tonlib)
|
||||
target_link_libraries(test-tonlib tdactor adnllite tl_api ton_crypto tl_tonlib_api tonlib)
|
||||
|
||||
add_executable(test-tonlib-offline test/test-td-main.cpp ${TONLIB_OFFLINE_TEST_SOURCE})
|
||||
target_link_libraries(test-tonlib-offline tdutils tdactor adnllite tl_api ton_crypto ton_block fift-lib tl_tonlib_api tonlib)
|
||||
target_link_libraries(test-tonlib-offline tdactor adnllite tl_api ton_crypto fift-lib tl_tonlib_api tonlib)
|
||||
|
||||
if (NOT CMAKE_CROSSCOMPILING)
|
||||
add_dependencies(test-tonlib-offline gen_fif)
|
||||
|
@ -529,6 +533,8 @@ target_link_libraries(test-rldp2 adnl adnltest dht rldp2 tl_api)
|
|||
add_executable(test-validator-session-state test/test-validator-session-state.cpp)
|
||||
target_link_libraries(test-validator-session-state adnl dht rldp validatorsession tl_api)
|
||||
|
||||
add_executable(test-overlay test/test-overlay.cpp)
|
||||
target_link_libraries(test-overlay overlay tdutils tdactor adnl adnltest tl_api dht )
|
||||
add_executable(test-catchain test/test-catchain.cpp)
|
||||
target_link_libraries(test-catchain overlay tdutils tdactor adnl adnltest rldp tl_api dht
|
||||
catchain )
|
||||
|
|
16
Changelog.md
16
Changelog.md
|
@ -1,3 +1,19 @@
|
|||
## 2024.10 Update
|
||||
|
||||
1. Parallel write to celldb: substantial improvement of sync and GC speed, especially with slow disks.
|
||||
2. Decreased network traffic: only first block candidate is sent optimistically.
|
||||
3. Improved channel creation and dht lookups, introduction of semi-private overlays
|
||||
4. New LS dispatch queue related methods and improvement security
|
||||
5. Fixing recursion in TVM continuations
|
||||
6. Improved stats for actors, validator sessions, perf counters, overlays, adnl, rocksdb
|
||||
7. Migration to C++20
|
||||
8. Improved block size estimates: account for depth in various structures
|
||||
9. Fix bug with `<<` optimization in FunC
|
||||
10. Minor changes of TVM which will be activated by `Config8.version >= 9`
|
||||
11. Multiple minor improvements
|
||||
|
||||
Besides the work of the core team, this update is based on the efforts of @krigga (emulator), Arayz @ TonBit (LS security, TVM recursion), @ret2happy (UB in BLST).
|
||||
|
||||
## 2024.08 Update
|
||||
|
||||
1. Introduction of dispatch queues, message envelopes with transaction chain metadata, and explicitly stored msg_queue size, which will be activated by `Config8.version >= 8` and new `Config8.capabilities` bits: `capStoreOutMsgQueueSize`, `capMsgMetadata`, `capDeferMessages`.
|
||||
|
|
|
@ -88,17 +88,15 @@ target_link_libraries(adnl PUBLIC tdactor ton_crypto tl_api tdnet tddb keys keyr
|
|||
|
||||
add_executable(adnl-proxy ${ADNL_PROXY_SOURCE})
|
||||
target_include_directories(adnl-proxy PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(adnl-proxy PUBLIC tdactor ton_crypto tl_api tdnet common
|
||||
tl-utils git)
|
||||
target_link_libraries(adnl-proxy PUBLIC tdactor ton_crypto tl_api tdnet common tl-utils git)
|
||||
|
||||
add_executable(adnl-pong adnl-pong.cpp)
|
||||
target_include_directories(adnl-pong PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(adnl-pong PUBLIC tdactor ton_crypto tl_api tdnet common
|
||||
tl-utils adnl dht git)
|
||||
target_link_libraries(adnl-pong PUBLIC tdactor ton_crypto tl_api tdnet common tl-utils adnl dht git)
|
||||
|
||||
add_library(adnltest STATIC ${ADNL_TEST_SOURCE})
|
||||
target_include_directories(adnltest PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(adnltest PUBLIC adnl )
|
||||
target_link_libraries(adnltest PUBLIC adnl)
|
||||
|
||||
install(TARGETS adnl-proxy RUNTIME DESTINATION bin)
|
||||
endif()
|
||||
|
|
|
@ -306,7 +306,7 @@ void AdnlLocalId::update_packet(AdnlPacket packet, bool update_id, bool sign, td
|
|||
}
|
||||
}
|
||||
|
||||
void AdnlLocalId::get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats_localId>> promise) {
|
||||
void AdnlLocalId::get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats_localId>> promise) {
|
||||
auto stats = create_tl_object<ton_api::adnl_stats_localId>();
|
||||
stats->short_id_ = short_id_.bits256_value();
|
||||
for (auto &[ip, x] : inbound_rate_limiter_) {
|
||||
|
@ -317,7 +317,7 @@ void AdnlLocalId::get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats_localI
|
|||
}
|
||||
prepare_packet_stats();
|
||||
stats->packets_recent_ = packet_stats_prev_.tl();
|
||||
stats->packets_total_ = packet_stats_total_.tl();
|
||||
stats->packets_total_ = packet_stats_total_.tl(all);
|
||||
stats->packets_total_->ts_start_ = (double)Adnl::adnl_start_time();
|
||||
stats->packets_total_->ts_end_ = td::Clocks::system();
|
||||
promise.set_result(std::move(stats));
|
||||
|
@ -325,14 +325,14 @@ void AdnlLocalId::get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats_localI
|
|||
|
||||
void AdnlLocalId::add_decrypted_packet_stats(td::IPAddress addr) {
|
||||
prepare_packet_stats();
|
||||
++packet_stats_cur_.decrypted_packets[addr];
|
||||
++packet_stats_total_.decrypted_packets[addr];
|
||||
packet_stats_cur_.decrypted_packets[addr].inc();
|
||||
packet_stats_total_.decrypted_packets[addr].inc();
|
||||
}
|
||||
|
||||
void AdnlLocalId::add_dropped_packet_stats(td::IPAddress addr) {
|
||||
prepare_packet_stats();
|
||||
++packet_stats_cur_.dropped_packets[addr];
|
||||
++packet_stats_total_.dropped_packets[addr];
|
||||
packet_stats_cur_.dropped_packets[addr].inc();
|
||||
packet_stats_total_.dropped_packets[addr].inc();
|
||||
}
|
||||
|
||||
void AdnlLocalId::prepare_packet_stats() {
|
||||
|
@ -351,17 +351,22 @@ void AdnlLocalId::prepare_packet_stats() {
|
|||
}
|
||||
}
|
||||
|
||||
tl_object_ptr<ton_api::adnl_stats_localIdPackets> AdnlLocalId::PacketStats::tl() const {
|
||||
tl_object_ptr<ton_api::adnl_stats_localIdPackets> AdnlLocalId::PacketStats::tl(bool all) const {
|
||||
double threshold = all ? -1.0 : td::Clocks::system() - 600.0;
|
||||
auto obj = create_tl_object<ton_api::adnl_stats_localIdPackets>();
|
||||
obj->ts_start_ = ts_start;
|
||||
obj->ts_end_ = ts_end;
|
||||
for (const auto &[ip, packets] : decrypted_packets) {
|
||||
if (packets.last_packet_ts >= threshold) {
|
||||
obj->decrypted_packets_.push_back(create_tl_object<ton_api::adnl_stats_ipPackets>(
|
||||
ip.is_valid() ? PSTRING() << ip.get_ip_str() << ":" << ip.get_port() : "", packets));
|
||||
ip.is_valid() ? PSTRING() << ip.get_ip_str() << ":" << ip.get_port() : "", packets.packets));
|
||||
}
|
||||
}
|
||||
for (const auto &[ip, packets] : dropped_packets) {
|
||||
if (packets.last_packet_ts >= threshold) {
|
||||
obj->dropped_packets_.push_back(create_tl_object<ton_api::adnl_stats_ipPackets>(
|
||||
ip.is_valid() ? PSTRING() << ip.get_ip_str() << ":" << ip.get_port() : "", packets));
|
||||
ip.is_valid() ? PSTRING() << ip.get_ip_str() << ":" << ip.get_port() : "", packets.packets));
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ class AdnlLocalId : public td::actor::Actor {
|
|||
void update_packet(AdnlPacket packet, bool update_id, bool sign, td::int32 update_addr_list_if,
|
||||
td::int32 update_priority_addr_list_if, td::Promise<AdnlPacket> promise);
|
||||
|
||||
void get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats_localId>> promise);
|
||||
void get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats_localId>> promise);
|
||||
|
||||
td::uint32 get_mode() {
|
||||
return mode_;
|
||||
|
@ -111,10 +111,20 @@ class AdnlLocalId : public td::actor::Actor {
|
|||
std::map<td::IPAddress, InboundRateLimiter> inbound_rate_limiter_;
|
||||
struct PacketStats {
|
||||
double ts_start = 0.0, ts_end = 0.0;
|
||||
std::map<td::IPAddress, td::uint64> decrypted_packets;
|
||||
std::map<td::IPAddress, td::uint64> dropped_packets;
|
||||
|
||||
tl_object_ptr<ton_api::adnl_stats_localIdPackets> tl() const;
|
||||
struct Counter {
|
||||
td::uint64 packets = 0;
|
||||
double last_packet_ts = 0.0;
|
||||
|
||||
void inc() {
|
||||
++packets;
|
||||
last_packet_ts = td::Clocks::system();
|
||||
}
|
||||
};
|
||||
std::map<td::IPAddress, Counter> decrypted_packets;
|
||||
std::map<td::IPAddress, Counter> dropped_packets;
|
||||
|
||||
tl_object_ptr<ton_api::adnl_stats_localIdPackets> tl(bool all = true) const;
|
||||
} packet_stats_cur_, packet_stats_prev_, packet_stats_total_;
|
||||
void add_decrypted_packet_stats(td::IPAddress addr);
|
||||
void add_dropped_packet_stats(td::IPAddress addr);
|
||||
|
|
|
@ -385,7 +385,7 @@ void AdnlPeerTableImpl::get_conn_ip_str(AdnlNodeIdShort l_id, AdnlNodeIdShort p_
|
|||
td::actor::send_closure(it->second, &AdnlPeer::get_conn_ip_str, l_id, std::move(promise));
|
||||
}
|
||||
|
||||
void AdnlPeerTableImpl::get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats>> promise) {
|
||||
void AdnlPeerTableImpl::get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats>> promise) {
|
||||
class Cb : public td::actor::Actor {
|
||||
public:
|
||||
explicit Cb(td::Promise<tl_object_ptr<ton_api::adnl_stats>> promise) : promise_(std::move(promise)) {
|
||||
|
@ -440,7 +440,7 @@ void AdnlPeerTableImpl::get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats>
|
|||
|
||||
for (auto &[id, local_id] : local_ids_) {
|
||||
td::actor::send_closure(callback, &Cb::inc_pending);
|
||||
td::actor::send_closure(local_id.local_id, &AdnlLocalId::get_stats,
|
||||
td::actor::send_closure(local_id.local_id, &AdnlLocalId::get_stats, all,
|
||||
[id = id, callback](td::Result<tl_object_ptr<ton_api::adnl_stats_localId>> R) {
|
||||
if (R.is_error()) {
|
||||
VLOG(ADNL_NOTICE)
|
||||
|
@ -454,7 +454,7 @@ void AdnlPeerTableImpl::get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats>
|
|||
for (auto &[id, peer] : peers_) {
|
||||
td::actor::send_closure(callback, &Cb::inc_pending);
|
||||
td::actor::send_closure(
|
||||
peer, &AdnlPeer::get_stats,
|
||||
peer, &AdnlPeer::get_stats, all,
|
||||
[id = id, callback](td::Result<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> R) {
|
||||
if (R.is_error()) {
|
||||
VLOG(ADNL_NOTICE) << "failed to get stats for peer " << id << " : " << R.move_as_error();
|
||||
|
|
|
@ -108,7 +108,7 @@ class AdnlPeerTableImpl : public AdnlPeerTable {
|
|||
td::Promise<std::pair<td::actor::ActorOwn<AdnlTunnel>, AdnlAddress>> promise) override;
|
||||
void get_conn_ip_str(AdnlNodeIdShort l_id, AdnlNodeIdShort p_id, td::Promise<td::string> promise) override;
|
||||
|
||||
void get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats>> promise) override;
|
||||
void get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats>> promise) override;
|
||||
|
||||
struct PrintId {};
|
||||
PrintId print_id() const {
|
||||
|
|
|
@ -808,7 +808,15 @@ void AdnlPeerPairImpl::get_conn_ip_str(td::Promise<td::string> promise) {
|
|||
promise.set_value("undefined");
|
||||
}
|
||||
|
||||
void AdnlPeerPairImpl::get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats_peerPair>> promise) {
|
||||
void AdnlPeerPairImpl::get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats_peerPair>> promise) {
|
||||
if (!all) {
|
||||
double threshold = td::Clocks::system() - 600.0;
|
||||
if (last_in_packet_ts_ < threshold && last_out_packet_ts_ < threshold) {
|
||||
promise.set_value(nullptr);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
auto stats = create_tl_object<ton_api::adnl_stats_peerPair>();
|
||||
stats->local_id_ = local_id_.bits256_value();
|
||||
stats->peer_id_ = peer_id_short_.bits256_value();
|
||||
|
@ -993,7 +1001,7 @@ void AdnlPeerImpl::update_addr_list(AdnlNodeIdShort local_id, td::uint32 local_m
|
|||
td::actor::send_closure(it->second, &AdnlPeerPair::update_addr_list, std::move(addr_list));
|
||||
}
|
||||
|
||||
void AdnlPeerImpl::get_stats(td::Promise<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> promise) {
|
||||
void AdnlPeerImpl::get_stats(bool all, td::Promise<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> promise) {
|
||||
class Cb : public td::actor::Actor {
|
||||
public:
|
||||
explicit Cb(td::Promise<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> promise)
|
||||
|
@ -1001,7 +1009,9 @@ void AdnlPeerImpl::get_stats(td::Promise<std::vector<tl_object_ptr<ton_api::adnl
|
|||
}
|
||||
|
||||
void got_peer_pair_stats(tl_object_ptr<ton_api::adnl_stats_peerPair> peer_pair) {
|
||||
if (peer_pair) {
|
||||
result_.push_back(std::move(peer_pair));
|
||||
}
|
||||
dec_pending();
|
||||
}
|
||||
|
||||
|
@ -1027,7 +1037,7 @@ void AdnlPeerImpl::get_stats(td::Promise<std::vector<tl_object_ptr<ton_api::adnl
|
|||
|
||||
for (auto &[local_id, peer_pair] : peer_pairs_) {
|
||||
td::actor::send_closure(callback, &Cb::inc_pending);
|
||||
td::actor::send_closure(peer_pair, &AdnlPeerPair::get_stats,
|
||||
td::actor::send_closure(peer_pair, &AdnlPeerPair::get_stats, all,
|
||||
[local_id = local_id, peer_id = peer_id_short_,
|
||||
callback](td::Result<tl_object_ptr<ton_api::adnl_stats_peerPair>> R) {
|
||||
if (R.is_error()) {
|
||||
|
|
|
@ -59,7 +59,7 @@ class AdnlPeerPair : public td::actor::Actor {
|
|||
virtual void update_peer_id(AdnlNodeIdFull id) = 0;
|
||||
virtual void update_addr_list(AdnlAddressList addr_list) = 0;
|
||||
virtual void get_conn_ip_str(td::Promise<td::string> promise) = 0;
|
||||
virtual void get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats_peerPair>> promise) = 0;
|
||||
virtual void get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats_peerPair>> promise) = 0;
|
||||
|
||||
static td::actor::ActorOwn<AdnlPeerPair> create(td::actor::ActorId<AdnlNetworkManager> network_manager,
|
||||
td::actor::ActorId<AdnlPeerTable> peer_table, td::uint32 local_mode,
|
||||
|
@ -101,7 +101,7 @@ class AdnlPeer : public td::actor::Actor {
|
|||
td::actor::ActorId<AdnlLocalId> local_actor, AdnlAddressList addr_list) = 0;
|
||||
virtual void update_dht_node(td::actor::ActorId<dht::Dht> dht_node) = 0;
|
||||
virtual void get_conn_ip_str(AdnlNodeIdShort l_id, td::Promise<td::string> promise) = 0;
|
||||
virtual void get_stats(td::Promise<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> promise) = 0;
|
||||
virtual void get_stats(bool all, td::Promise<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> promise) = 0;
|
||||
};
|
||||
|
||||
} // namespace adnl
|
||||
|
|
|
@ -90,7 +90,7 @@ class AdnlPeerPairImpl : public AdnlPeerPair {
|
|||
void update_peer_id(AdnlNodeIdFull id) override;
|
||||
|
||||
void get_conn_ip_str(td::Promise<td::string> promise) override;
|
||||
void get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats_peerPair>> promise) override;
|
||||
void get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats_peerPair>> promise) override;
|
||||
|
||||
void got_data_from_db(td::Result<AdnlDbItem> R);
|
||||
void got_data_from_static_nodes(td::Result<AdnlNode> R);
|
||||
|
@ -302,7 +302,7 @@ class AdnlPeerImpl : public AdnlPeer {
|
|||
AdnlAddressList addr_list) override;
|
||||
void update_dht_node(td::actor::ActorId<dht::Dht> dht_node) override;
|
||||
void get_conn_ip_str(AdnlNodeIdShort l_id, td::Promise<td::string> promise) override;
|
||||
void get_stats(td::Promise<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> promise) override;
|
||||
void get_stats(bool all, td::Promise<std::vector<tl_object_ptr<ton_api::adnl_stats_peerPair>>> promise) override;
|
||||
//void check_signature(td::BufferSlice data, td::BufferSlice signature, td::Promise<td::Unit> promise) override;
|
||||
|
||||
AdnlPeerImpl(td::actor::ActorId<AdnlNetworkManager> network_manager, td::actor::ActorId<AdnlPeerTable> peer_table,
|
||||
|
|
|
@ -25,7 +25,7 @@ namespace ton {
|
|||
namespace adnl {
|
||||
|
||||
void AdnlQuery::alarm() {
|
||||
set_error(td::Status::Error(ErrorCode::timeout, "adnl query timeout"));
|
||||
set_error(td::Status::Error(ErrorCode::timeout, PSTRING() << "timeout for adnl query " << name_));
|
||||
}
|
||||
void AdnlQuery::result(td::BufferSlice data) {
|
||||
promise_.set_value(std::move(data));
|
||||
|
|
|
@ -121,7 +121,7 @@ class Adnl : public AdnlSenderInterface {
|
|||
virtual void create_tunnel(AdnlNodeIdShort dst, td::uint32 size,
|
||||
td::Promise<std::pair<td::actor::ActorOwn<AdnlTunnel>, AdnlAddress>> promise) = 0;
|
||||
|
||||
virtual void get_stats(td::Promise<tl_object_ptr<ton_api::adnl_stats>> promise) = 0;
|
||||
virtual void get_stats(bool all, td::Promise<tl_object_ptr<ton_api::adnl_stats>> promise) = 0;
|
||||
|
||||
static td::actor::ActorOwn<Adnl> create(std::string db, td::actor::ActorId<keyring::Keyring> keyring);
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ else
|
|||
fi
|
||||
|
||||
export NONINTERACTIVE=1
|
||||
brew install ninja pkg-config automake libtool autoconf
|
||||
brew install ninja pkg-config automake libtool autoconf texinfo
|
||||
brew install llvm@16
|
||||
|
||||
|
||||
|
@ -88,7 +88,7 @@ if [ ! -d "openssl_3" ]; then
|
|||
cd openssl_3
|
||||
opensslPath=`pwd`
|
||||
git checkout openssl-3.1.4
|
||||
./config -static
|
||||
./config
|
||||
make build_libs -j12
|
||||
test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; }
|
||||
cd ..
|
||||
|
@ -168,23 +168,23 @@ else
|
|||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||
fi
|
||||
|
||||
strip storage/storage-daemon/storage-daemon
|
||||
strip storage/storage-daemon/storage-daemon-cli
|
||||
strip blockchain-explorer/blockchain-explorer
|
||||
strip crypto/fift
|
||||
strip crypto/func
|
||||
strip crypto/create-state
|
||||
strip crypto/tlbc
|
||||
strip validator-engine-console/validator-engine-console
|
||||
strip tonlib/tonlib-cli
|
||||
strip http/http-proxy
|
||||
strip rldp-http-proxy/rldp-http-proxy
|
||||
strip dht-server/dht-server
|
||||
strip lite-client/lite-client
|
||||
strip validator-engine/validator-engine
|
||||
strip utils/generate-random-id
|
||||
strip utils/json2tlo
|
||||
strip adnl/adnl-proxy
|
||||
strip -s storage/storage-daemon/storage-daemon
|
||||
strip -s storage/storage-daemon/storage-daemon-cli
|
||||
strip -s blockchain-explorer/blockchain-explorer
|
||||
strip -s crypto/fift
|
||||
strip -s crypto/func
|
||||
strip -s crypto/create-state
|
||||
strip -s crypto/tlbc
|
||||
strip -s validator-engine-console/validator-engine-console
|
||||
strip -s tonlib/tonlib-cli
|
||||
strip -s http/http-proxy
|
||||
strip -s rldp-http-proxy/rldp-http-proxy
|
||||
strip -s dht-server/dht-server
|
||||
strip -s lite-client/lite-client
|
||||
strip -s validator-engine/validator-engine
|
||||
strip -s utils/generate-random-id
|
||||
strip -s utils/json2tlo
|
||||
strip -s adnl/adnl-proxy
|
||||
|
||||
cd ..
|
||||
|
||||
|
@ -192,8 +192,6 @@ if [ "$with_artifacts" = true ]; then
|
|||
echo Creating artifacts...
|
||||
rm -rf artifacts
|
||||
mkdir artifacts
|
||||
cp crypto/fift/lib artifacts/
|
||||
cp -R crypto/smartcont/ artifacts/
|
||||
cp build/storage/storage-daemon/storage-daemon artifacts/
|
||||
cp build/storage/storage-daemon/storage-daemon-cli artifacts/
|
||||
cp build/blockchain-explorer/blockchain-explorer artifacts/
|
||||
|
@ -213,9 +211,9 @@ if [ "$with_artifacts" = true ]; then
|
|||
cp build/utils/json2tlo artifacts/
|
||||
cp build/adnl/adnl-proxy artifacts/
|
||||
cp build/emulator/libemulator.dylib artifacts/
|
||||
chmod +x artifacts/*
|
||||
rsync -r crypto/smartcont artifacts/
|
||||
rsync -r crypto/fift/lib artifacts/
|
||||
chmod -R +x artifacts/*
|
||||
fi
|
||||
|
||||
if [ "$with_tests" = true ]; then
|
||||
|
|
|
@ -97,23 +97,23 @@ else
|
|||
fi
|
||||
|
||||
|
||||
strip storage/storage-daemon/storage-daemon
|
||||
strip storage/storage-daemon/storage-daemon-cli
|
||||
strip blockchain-explorer/blockchain-explorer
|
||||
strip crypto/fift
|
||||
strip crypto/func
|
||||
strip crypto/create-state
|
||||
strip crypto/tlbc
|
||||
strip validator-engine-console/validator-engine-console
|
||||
strip tonlib/tonlib-cli
|
||||
strip http/http-proxy
|
||||
strip rldp-http-proxy/rldp-http-proxy
|
||||
strip dht-server/dht-server
|
||||
strip lite-client/lite-client
|
||||
strip validator-engine/validator-engine
|
||||
strip utils/generate-random-id
|
||||
strip utils/json2tlo
|
||||
strip adnl/adnl-proxy
|
||||
strip -s storage/storage-daemon/storage-daemon
|
||||
strip -s storage/storage-daemon/storage-daemon-cli
|
||||
strip -s blockchain-explorer/blockchain-explorer
|
||||
strip -s crypto/fift
|
||||
strip -s crypto/func
|
||||
strip -s crypto/create-state
|
||||
strip -s crypto/tlbc
|
||||
strip -s validator-engine-console/validator-engine-console
|
||||
strip -s tonlib/tonlib-cli
|
||||
strip -s http/http-proxy
|
||||
strip -s rldp-http-proxy/rldp-http-proxy
|
||||
strip -s dht-server/dht-server
|
||||
strip -s lite-client/lite-client
|
||||
strip -s validator-engine/validator-engine
|
||||
strip -s utils/generate-random-id
|
||||
strip -s utils/json2tlo
|
||||
strip -s adnl/adnl-proxy
|
||||
|
||||
cd ..
|
||||
|
||||
|
@ -140,9 +140,9 @@ if [ "$with_artifacts" = true ]; then
|
|||
cp build/utils/json2tlo artifacts/
|
||||
cp build/adnl/adnl-proxy artifacts/
|
||||
cp build/emulator/libemulator.dylib artifacts/
|
||||
chmod +x artifacts/*
|
||||
rsync -r crypto/smartcont artifacts/
|
||||
rsync -r crypto/fift/lib artifacts/
|
||||
cp -R crypto/smartcont artifacts/
|
||||
cp -R crypto/fift/lib artifacts/
|
||||
chmod -R +x artifacts/*
|
||||
fi
|
||||
|
||||
if [ "$with_tests" = true ]; then
|
||||
|
|
|
@ -81,7 +81,7 @@ if [ ! -d "openssl_3" ]; then
|
|||
cd openssl_3
|
||||
opensslPath=`pwd`
|
||||
git checkout openssl-3.1.4
|
||||
./config -static
|
||||
./config
|
||||
make build_libs -j12
|
||||
test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; }
|
||||
cd ..
|
||||
|
@ -160,7 +160,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
|||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||
fi
|
||||
|
||||
strip -g storage/storage-daemon/storage-daemon \
|
||||
strip -s storage/storage-daemon/storage-daemon \
|
||||
storage/storage-daemon/storage-daemon-cli \
|
||||
blockchain-explorer/blockchain-explorer \
|
||||
crypto/fift \
|
||||
|
@ -193,8 +193,6 @@ cd ..
|
|||
if [ "$with_artifacts" = true ]; then
|
||||
rm -rf artifacts
|
||||
mkdir artifacts
|
||||
cp crypto/fift/lib artifacts/
|
||||
cp -R crypto/smartcont/ artifacts/
|
||||
mv build/tonlib/libtonlibjson.so.0.5 build/tonlib/libtonlibjson.so
|
||||
cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli \
|
||||
build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/blockchain-explorer/blockchain-explorer \
|
||||
|
@ -204,9 +202,9 @@ if [ "$with_artifacts" = true ]; then
|
|||
build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.so \
|
||||
artifacts
|
||||
test $? -eq 0 || { echo "Can't copy final binaries"; exit 1; }
|
||||
chmod +x artifacts/*
|
||||
cp -R crypto/smartcont artifacts
|
||||
cp -R crypto/fift/lib artifacts
|
||||
chmod -R +x artifacts/*
|
||||
fi
|
||||
|
||||
if [ "$with_tests" = true ]; then
|
||||
|
|
|
@ -68,7 +68,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
|||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||
fi
|
||||
|
||||
strip -g storage/storage-daemon/storage-daemon \
|
||||
strip -s storage/storage-daemon/storage-daemon \
|
||||
storage/storage-daemon/storage-daemon-cli \
|
||||
blockchain-explorer/blockchain-explorer \
|
||||
crypto/fift \
|
||||
|
@ -112,9 +112,9 @@ if [ "$with_artifacts" = true ]; then
|
|||
build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.so \
|
||||
artifacts
|
||||
test $? -eq 0 || { echo "Can't copy final binaries"; exit 1; }
|
||||
chmod +x artifacts/*
|
||||
cp -R crypto/smartcont artifacts
|
||||
cp -R crypto/fift/lib artifacts
|
||||
chmod -R +x artifacts/*
|
||||
fi
|
||||
|
||||
if [ "$with_tests" = true ]; then
|
||||
|
|
|
@ -216,6 +216,6 @@ build\validator-engine\validator-engine.exe ^
|
|||
build\utils\generate-random-id.exe ^
|
||||
build\utils\json2tlo.exe ^
|
||||
build\adnl\adnl-proxy.exe ^
|
||||
build\emulator\emulator.dll) do (strip -g %%I & copy %%I artifacts\)
|
||||
build\emulator\emulator.dll) do (strip -s %%I & copy %%I artifacts\)
|
||||
xcopy /e /k /h /i crypto\smartcont artifacts\smartcont
|
||||
xcopy /e /k /h /i crypto\fift\lib artifacts\lib
|
||||
|
|
|
@ -217,6 +217,6 @@ build\validator-engine\validator-engine.exe ^
|
|||
build\utils\generate-random-id.exe ^
|
||||
build\utils\json2tlo.exe ^
|
||||
build\adnl\adnl-proxy.exe ^
|
||||
build\emulator\emulator.dll) do (strip -g %%I & copy %%I artifacts\)
|
||||
build\emulator\emulator.dll) do (strip -s %%I & copy %%I artifacts\)
|
||||
xcopy /e /k /h /i crypto\smartcont artifacts\smartcont
|
||||
xcopy /e /k /h /i crypto\fift\lib artifacts\lib
|
||||
|
|
|
@ -35,3 +35,23 @@ cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so
|
|||
cp ./result/lib/libemulator.so artifacts/
|
||||
cp ./result/lib/fift/* artifacts/lib/
|
||||
cp -r ./result/share/ton/smartcont artifacts/
|
||||
chmod -R +x artifacts
|
||||
cd artifacts
|
||||
sudo strip -s storage-daemon \
|
||||
storage-daemon-cli \
|
||||
blockchain-explorer \
|
||||
fift \
|
||||
tlbc \
|
||||
func \
|
||||
create-state \
|
||||
validator-engine-console \
|
||||
tonlib-cli \
|
||||
http-proxy \
|
||||
rldp-http-proxy \
|
||||
dht-server \
|
||||
lite-client \
|
||||
validator-engine \
|
||||
generate-random-id \
|
||||
adnl-proxy \
|
||||
libemulator.so \
|
||||
libtonlibjson.so
|
||||
|
|
|
@ -35,3 +35,23 @@ cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so
|
|||
cp ./result/lib/libemulator.so artifacts/
|
||||
cp ./result/lib/fift/* artifacts/lib/
|
||||
cp -r ./result/share/ton/smartcont artifacts/
|
||||
chmod -R +x artifacts
|
||||
cd artifacts
|
||||
sudo strip -s storage-daemon \
|
||||
storage-daemon-cli \
|
||||
blockchain-explorer \
|
||||
fift \
|
||||
tlbc \
|
||||
func \
|
||||
create-state \
|
||||
validator-engine-console \
|
||||
tonlib-cli \
|
||||
http-proxy \
|
||||
rldp-http-proxy \
|
||||
dht-server \
|
||||
lite-client \
|
||||
validator-engine \
|
||||
generate-random-id \
|
||||
adnl-proxy \
|
||||
libemulator.so \
|
||||
libtonlibjson.so
|
||||
|
|
|
@ -35,3 +35,23 @@ cp ./result/lib/libtonlibjson.dylib artifacts/
|
|||
cp ./result/lib/libemulator.dylib artifacts/
|
||||
cp ./result/lib/fift/* artifacts/lib/
|
||||
cp -r ./result/share/ton/smartcont artifacts/
|
||||
chmod -R +x artifacts
|
||||
cd artifacts
|
||||
sudo strip -xSX storage-daemon \
|
||||
storage-daemon-cli \
|
||||
blockchain-explorer \
|
||||
fift \
|
||||
tlbc \
|
||||
func \
|
||||
create-state \
|
||||
validator-engine-console \
|
||||
tonlib-cli \
|
||||
http-proxy \
|
||||
rldp-http-proxy \
|
||||
dht-server \
|
||||
lite-client \
|
||||
validator-engine \
|
||||
generate-random-id \
|
||||
adnl-proxy \
|
||||
libemulator.dylib \
|
||||
libtonlibjson.dylib
|
||||
|
|
|
@ -42,10 +42,13 @@ pkgs.llvmPackages_16.stdenv.mkDerivation {
|
|||
];
|
||||
|
||||
dontAddStaticConfigureFlags = false;
|
||||
doCheck = false;
|
||||
doInstallCheck = false;
|
||||
|
||||
cmakeFlags = [
|
||||
"-DTON_USE_ABSEIL=OFF"
|
||||
"-DTON_USE_ABSEIL=ON"
|
||||
"-DNIX=ON"
|
||||
"-DTON_ONLY_TONLIB=ON"
|
||||
];
|
||||
|
||||
LDFLAGS = [
|
||||
|
|
|
@ -58,10 +58,13 @@ stdenv227.mkDerivation {
|
|||
];
|
||||
|
||||
dontAddStaticConfigureFlags = false;
|
||||
doCheck = false;
|
||||
doInstallCheck = false;
|
||||
|
||||
cmakeFlags = [
|
||||
"-DTON_USE_ABSEIL=OFF"
|
||||
"-DTON_USE_ABSEIL=ON"
|
||||
"-DNIX=ON"
|
||||
"-DTON_ONLY_TONLIB=ON"
|
||||
];
|
||||
|
||||
LDFLAGS = [
|
||||
|
|
|
@ -16,26 +16,26 @@ add_executable(blockchain-explorer ${BLOCHAIN_EXPLORER_SOURCE})
|
|||
if (NIX)
|
||||
if (MHD_FOUND)
|
||||
target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR})
|
||||
target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY})
|
||||
target_link_libraries(blockchain-explorer tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ${MHD_LIBRARY})
|
||||
else()
|
||||
find_package(PkgConfig REQUIRED)
|
||||
pkg_check_modules(MHD libmicrohttpd)
|
||||
target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR} ${MHD_STATIC_INCLUDE_DIRS})
|
||||
target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARIES} ${MHD_STATIC_LIBRARIES})
|
||||
target_link_libraries(blockchain-explorer tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ${MHD_LIBRARIES} ${MHD_STATIC_LIBRARIES})
|
||||
endif()
|
||||
else()
|
||||
if (MHD_FOUND)
|
||||
target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR})
|
||||
target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY})
|
||||
target_link_libraries(blockchain-explorer tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ${MHD_LIBRARY})
|
||||
else()
|
||||
find_package(MHD)
|
||||
target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR})
|
||||
target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY})
|
||||
target_link_libraries(blockchain-explorer tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ${MHD_LIBRARY})
|
||||
endif()
|
||||
endif()
|
||||
|
||||
target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR})
|
||||
target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY})
|
||||
target_link_libraries(blockchain-explorer tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ${MHD_LIBRARY})
|
||||
|
||||
install(TARGETS blockchain-explorer RUNTIME DESTINATION bin)
|
||||
|
||||
|
|
|
@ -122,9 +122,8 @@ HttpAnswer& HttpAnswer::operator<<(MessageCell msg) {
|
|||
abort("cannot unpack internal message");
|
||||
return *this;
|
||||
}
|
||||
td::RefInt256 value;
|
||||
td::Ref<vm::Cell> extra;
|
||||
if (!block::unpack_CurrencyCollection(info.value, value, extra)) {
|
||||
block::CurrencyCollection currency_collection;
|
||||
if (!currency_collection.unpack(info.value)) {
|
||||
abort("cannot unpack message value");
|
||||
return *this;
|
||||
}
|
||||
|
@ -133,7 +132,7 @@ HttpAnswer& HttpAnswer::operator<<(MessageCell msg) {
|
|||
<< "<tr><th>destination</th><td>" << AddressCell{info.dest} << "</td></tr>\n"
|
||||
<< "<tr><th>lt</th><td>" << info.created_lt << "</td></tr>\n"
|
||||
<< "<tr><th>time</th><td>" << info.created_at << " (" << time_to_human(info.created_at) << ")</td></tr>\n"
|
||||
<< "<tr><th>value</th><td>" << value << "</td></tr>\n";
|
||||
<< "<tr><th>value</th><td>" << currency_collection.to_str()<< "</td></tr>\n";
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -365,6 +364,7 @@ HttpAnswer& HttpAnswer::operator<<(AccountCell acc_c) {
|
|||
ton::LogicalTime last_trans_lt = 0;
|
||||
ton::Bits256 last_trans_hash;
|
||||
last_trans_hash.set_zero();
|
||||
block::CurrencyCollection balance = block::CurrencyCollection::zero();
|
||||
try {
|
||||
auto state_root = vm::MerkleProof::virtualize(acc_c.q_roots[1], 1);
|
||||
if (state_root.is_null()) {
|
||||
|
@ -397,6 +397,20 @@ HttpAnswer& HttpAnswer::operator<<(AccountCell acc_c) {
|
|||
}
|
||||
last_trans_hash = acc_info.last_trans_hash;
|
||||
last_trans_lt = acc_info.last_trans_lt;
|
||||
block::gen::Account::Record_account acc;
|
||||
block::gen::AccountStorage::Record storage_rec;
|
||||
if (!tlb::unpack_cell(acc_c.root, acc)) {
|
||||
abort("cannot unpack Account");
|
||||
return *this;
|
||||
}
|
||||
if (!tlb::csr_unpack(acc.storage, storage_rec)) {
|
||||
abort("cannot unpack AccountStorage");
|
||||
return *this;
|
||||
}
|
||||
if (!balance.unpack(storage_rec.balance)) {
|
||||
abort("cannot unpack account balance");
|
||||
return *this;
|
||||
}
|
||||
} else if (acc_c.root.not_null()) {
|
||||
abort(PSTRING() << "account state proof shows that account state for " << acc_c.addr.workchain << ":"
|
||||
<< acc_c.addr.addr.to_hex() << " must be empty, but it is not");
|
||||
|
@ -434,6 +448,7 @@ HttpAnswer& HttpAnswer::operator<<(AccountCell acc_c) {
|
|||
*this << "<tr><th>workchain</th><td>" << acc_c.addr.workchain << "</td></tr>";
|
||||
*this << "<tr><th>account hex</th><td>" << acc_c.addr.addr.to_hex() << "</td></tr>";
|
||||
*this << "<tr><th>account</th><td>" << acc_c.addr.rserialize(true) << "</td></tr>";
|
||||
*this << "<tr><th>balance</th><td>" << balance.to_str() << "</td></tr>";
|
||||
if (last_trans_lt > 0) {
|
||||
*this << "<tr><th>last transaction</th><td>"
|
||||
<< "<a href=\"" << TransactionLink{acc_c.addr, last_trans_lt, last_trans_hash} << "\">lt=" << last_trans_lt
|
||||
|
|
|
@ -34,6 +34,5 @@ target_include_directories(overlay PUBLIC
|
|||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>/..
|
||||
${OPENSSL_INCLUDE_DIR}
|
||||
)
|
||||
target_link_libraries(catchain PRIVATE tdutils tdactor adnl tl_api dht tdfec
|
||||
overlay)
|
||||
target_link_libraries(catchain PRIVATE tdutils tdactor adnl tl_api dht tdfec overlay)
|
||||
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
|
||||
#include "catchain-receiver.hpp"
|
||||
|
||||
#include "td/utils/ThreadSafeCounter.h"
|
||||
|
||||
namespace ton {
|
||||
|
||||
namespace catchain {
|
||||
|
@ -369,7 +371,7 @@ void CatChainReceiverImpl::add_block(td::BufferSlice payload, std::vector<CatCha
|
|||
|
||||
int height = prev->height_ + 1;
|
||||
auto max_block_height = get_max_block_height(opts_, sources_.size());
|
||||
if (height > max_block_height) {
|
||||
if (td::narrow_cast<td::uint64>(height) > max_block_height) {
|
||||
VLOG(CATCHAIN_WARNING) << this << ": cannot create block: max height exceeded (" << max_block_height << ")";
|
||||
active_send_ = false;
|
||||
return;
|
||||
|
@ -685,6 +687,7 @@ void CatChainReceiverImpl::receive_query_from_overlay(adnl::AdnlNodeIdShort src,
|
|||
promise.set_error(td::Status::Error(ErrorCode::notready, "db not read"));
|
||||
return;
|
||||
}
|
||||
TD_PERF_COUNTER(catchain_query_process);
|
||||
td::PerfWarningTimer t{"catchain query process", 0.05};
|
||||
auto F = fetch_tl_object<ton_api::Function>(data.clone(), true);
|
||||
if (F.is_error()) {
|
||||
|
|
|
@ -49,4 +49,29 @@ template <typename T>
|
|||
void delay_action(T promise, td::Timestamp timeout) {
|
||||
DelayedAction<T>::create(std::move(promise), timeout);
|
||||
}
|
||||
|
||||
template <typename PromiseT, typename ValueT>
|
||||
class AsyncApply : public td::actor::Actor {
|
||||
public:
|
||||
AsyncApply(PromiseT promise, ValueT value) : promise_(std::move(promise)), value_(std::move(value)){
|
||||
}
|
||||
|
||||
void start_up() override {
|
||||
promise_(std::move(value_));
|
||||
stop();
|
||||
}
|
||||
|
||||
static void create(td::Slice name, PromiseT promise, ValueT value ) {
|
||||
td::actor::create_actor<AsyncApply>(PSLICE() << "async:" << name, std::move(promise), std::move(value)).release();
|
||||
}
|
||||
|
||||
private:
|
||||
PromiseT promise_;
|
||||
ValueT value_;
|
||||
};
|
||||
|
||||
template <class PromiseT, class ValueT>
|
||||
void async_apply(td::Slice name, PromiseT &&promise, ValueT &&value) {
|
||||
AsyncApply<PromiseT, ValueT>::create(name, std::forward<PromiseT>(promise), std::forward<ValueT>(value));
|
||||
}
|
||||
} // namespace ton
|
||||
|
|
|
@ -19,6 +19,6 @@
|
|||
namespace ton {
|
||||
|
||||
// See doc/GlobalVersions.md
|
||||
const int SUPPORTED_VERSION = 8;
|
||||
const int SUPPORTED_VERSION = 9;
|
||||
|
||||
}
|
||||
|
|
|
@ -10,8 +10,7 @@ set(CREATE_HARDFORK_SOURCE
|
|||
)
|
||||
|
||||
add_executable(create-hardfork ${CREATE_HARDFORK_SOURCE})
|
||||
target_link_libraries(create-hardfork overlay tdutils tdactor adnl tl_api dht
|
||||
rldp catchain validatorsession full-node validator-hardfork ton_validator
|
||||
target_link_libraries(create-hardfork overlay tdutils tdactor adnl tl_api dht rldp catchain validatorsession full-node validator-hardfork ton_validator
|
||||
validator-hardfork fift-lib memprof git ${JEMALLOC_LIBRARIES})
|
||||
|
||||
install(TARGETS create-hardfork RUNTIME DESTINATION bin)
|
||||
|
|
|
@ -249,7 +249,7 @@ class HardforkCreator : public td::actor::Actor {
|
|||
void send_block_candidate(ton::BlockIdExt block_id, ton::CatchainSeqno cc_seqno, td::uint32 validator_set_hash,
|
||||
td::BufferSlice data) override {
|
||||
}
|
||||
void send_broadcast(ton::BlockBroadcast broadcast, bool custom_overlays_only) override {
|
||||
void send_broadcast(ton::BlockBroadcast broadcast, int mode) override {
|
||||
}
|
||||
void download_block(ton::BlockIdExt block_id, td::uint32 priority, td::Timestamp timeout,
|
||||
td::Promise<ton::ReceivedBlock> promise) override {
|
||||
|
|
|
@ -151,6 +151,7 @@ set(TON_DB_SOURCE
|
|||
vm/db/CellHashTable.h
|
||||
vm/db/CellStorage.h
|
||||
vm/db/TonDb.h
|
||||
vm/db/InMemoryBagOfCellsDb.cpp
|
||||
)
|
||||
|
||||
set(FIFT_SOURCE
|
||||
|
@ -300,9 +301,8 @@ endif()
|
|||
target_include_directories(ton_crypto_core SYSTEM PUBLIC $<BUILD_INTERFACE:${OPENSSL_INCLUDE_DIR}>)
|
||||
|
||||
add_library(ton_crypto STATIC ${TON_CRYPTO_SOURCE})
|
||||
target_include_directories(ton_crypto PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(ton_crypto PUBLIC ${OPENSSL_CRYPTO_LIBRARY} tdutils tddb_utils ton_crypto_core ton_block)
|
||||
target_include_directories(ton_crypto PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(ton_crypto PUBLIC ${OPENSSL_CRYPTO_LIBRARY} ton_crypto_core ton_block)
|
||||
if (USE_EMSCRIPTEN)
|
||||
target_link_options(ton_crypto PRIVATE -fexceptions)
|
||||
target_compile_options(ton_crypto PRIVATE -fexceptions)
|
||||
|
@ -344,6 +344,7 @@ else()
|
|||
target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARY})
|
||||
endif()
|
||||
|
||||
target_link_libraries(ton_crypto_core PUBLIC ${SECP256K1_LIBRARY})
|
||||
target_include_directories(ton_crypto_core PUBLIC $<BUILD_INTERFACE:${SODIUM_INCLUDE_DIR}>)
|
||||
target_link_libraries(ton_crypto PUBLIC ${SODIUM_LIBRARY_RELEASE})
|
||||
|
||||
|
@ -358,7 +359,7 @@ target_link_libraries(test-ed25519-crypto PUBLIC ton_crypto)
|
|||
|
||||
add_library(fift-lib STATIC ${FIFT_SOURCE})
|
||||
target_include_directories(fift-lib PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
|
||||
target_link_libraries(fift-lib PUBLIC ton_crypto tdutils ton_block)
|
||||
target_link_libraries(fift-lib PUBLIC ton_crypto)
|
||||
|
||||
if (USE_EMSCRIPTEN)
|
||||
target_link_options(fift-lib PRIVATE -fexceptions)
|
||||
|
@ -379,7 +380,7 @@ target_link_libraries(src_parser PUBLIC ton_crypto_core)
|
|||
add_library(ton_block STATIC ${BLOCK_SOURCE})
|
||||
target_include_directories(ton_block PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/block> $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(ton_block PUBLIC ton_crypto tdutils tdactor tl_api)
|
||||
target_link_libraries(ton_block PUBLIC ton_crypto_core tdactor tl_api)
|
||||
if (USE_EMSCRIPTEN)
|
||||
target_link_options(ton_block PRIVATE -fexceptions)
|
||||
target_compile_options(ton_block PRIVATE -fexceptions)
|
||||
|
@ -387,7 +388,7 @@ endif()
|
|||
|
||||
add_executable(func func/func-main.cpp ${FUNC_LIB_SOURCE})
|
||||
target_include_directories(func PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
|
||||
target_link_libraries(func PUBLIC ton_crypto src_parser git ton_block)
|
||||
target_link_libraries(func PUBLIC ton_crypto src_parser git)
|
||||
if (WINGETOPT_FOUND)
|
||||
target_link_libraries_system(func wingetopt)
|
||||
endif()
|
||||
|
@ -416,23 +417,31 @@ endif()
|
|||
|
||||
add_executable(tlbc tl/tlbc.cpp)
|
||||
target_include_directories(tlbc PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
|
||||
target_link_libraries(tlbc PUBLIC ton_crypto_core src_parser)
|
||||
target_link_libraries(tlbc PUBLIC src_parser)
|
||||
if (WINGETOPT_FOUND)
|
||||
target_link_libraries_system(tlbc wingetopt)
|
||||
endif()
|
||||
|
||||
add_library(pow-miner-lib util/Miner.cpp util/Miner.h)
|
||||
target_include_directories(pow-miner-lib PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
|
||||
target_link_libraries(pow-miner-lib PUBLIC ton_crypto ton_block)
|
||||
target_link_libraries(pow-miner-lib PUBLIC ton_crypto)
|
||||
|
||||
add_executable(pow-miner util/pow-miner.cpp)
|
||||
target_link_libraries(pow-miner PRIVATE ton_crypto ton_block pow-miner-lib git)
|
||||
target_link_libraries(pow-miner PRIVATE ton_crypto pow-miner-lib git)
|
||||
|
||||
if (WINGETOPT_FOUND)
|
||||
target_link_libraries_system(fift wingetopt)
|
||||
target_link_libraries_system(pow-miner wingetopt)
|
||||
endif()
|
||||
|
||||
add_executable(mintless-proof-generator util/mintless-proof-generator.cpp)
|
||||
target_link_libraries(mintless-proof-generator PRIVATE ton_crypto git ${JEMALLOC_LIBRARIES})
|
||||
|
||||
if (JEMALLOC_FOUND)
|
||||
target_include_directories(mintless-proof-generator PRIVATE ${JEMALLOC_INCLUDE_DIR})
|
||||
target_compile_definitions(mintless-proof-generator PRIVATE -DTON_USE_JEMALLOC=1)
|
||||
endif()
|
||||
|
||||
set(TURN_OFF_LSAN cd .)
|
||||
if (TON_USE_ASAN AND NOT WIN32)
|
||||
set(TURN_OFF_LSAN export LSAN_OPTIONS=detect_leaks=0)
|
||||
|
@ -531,12 +540,12 @@ add_executable(create-state block/create-state.cpp)
|
|||
target_include_directories(create-state PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
if (INTERNAL_COMPILE)
|
||||
target_link_libraries(create-state PUBLIC ton_crypto fift-lib ton_block tonlib git)
|
||||
target_link_libraries(create-state PUBLIC ton_crypto fift-lib tonlib git)
|
||||
else()
|
||||
if (TONLIB_COMPILE)
|
||||
target_link_libraries(create-state PUBLIC ton_crypto fift-lib ton_block tonlib git)
|
||||
target_link_libraries(create-state PUBLIC ton_crypto fift-lib tonlib git)
|
||||
else()
|
||||
target_link_libraries(create-state PUBLIC ton_crypto fift-lib ton_block git)
|
||||
target_link_libraries(create-state PUBLIC ton_crypto fift-lib git)
|
||||
endif()
|
||||
endif()
|
||||
if (WINGETOPT_FOUND)
|
||||
|
@ -546,7 +555,7 @@ endif()
|
|||
add_executable(dump-block block/dump-block.cpp)
|
||||
target_include_directories(dump-block PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(dump-block PUBLIC ton_crypto fift-lib ton_block git)
|
||||
target_link_libraries(dump-block PUBLIC ton_crypto fift-lib git)
|
||||
if (WINGETOPT_FOUND)
|
||||
target_link_libraries_system(dump-block wingetopt)
|
||||
endif()
|
||||
|
@ -554,7 +563,7 @@ endif()
|
|||
add_executable(adjust-block block/adjust-block.cpp)
|
||||
target_include_directories(adjust-block PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(adjust-block PUBLIC ton_crypto fift-lib ton_block git)
|
||||
target_link_libraries(adjust-block PUBLIC ton_crypto fift-lib git)
|
||||
if (WINGETOPT_FOUND)
|
||||
target_link_libraries_system(dump-block wingetopt)
|
||||
target_link_libraries_system(adjust-block wingetopt)
|
||||
|
@ -563,7 +572,7 @@ endif()
|
|||
add_executable(test-weight-distr block/test-weight-distr.cpp)
|
||||
target_include_directories(test-weight-distr PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>)
|
||||
target_link_libraries(test-weight-distr PUBLIC ton_crypto fift-lib ton_block git)
|
||||
target_link_libraries(test-weight-distr PUBLIC ton_crypto fift-lib git)
|
||||
if (WINGETOPT_FOUND)
|
||||
target_link_libraries_system(test-weight-distr wingetopt)
|
||||
endif()
|
||||
|
|
|
@ -719,8 +719,8 @@ td::uint64 BlockLimitStatus::estimate_block_size(const vm::NewCellStorageStat::S
|
|||
if (extra) {
|
||||
sum += *extra;
|
||||
}
|
||||
return 2000 + (sum.bits >> 3) + sum.cells * 12 + sum.internal_refs * 3 + sum.external_refs * 40 + accounts * 200 +
|
||||
transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300 + public_library_diff * 700;
|
||||
return 2000 + (sum.bits >> 3) + sum.cells * 12 + sum.internal_refs * 3 + sum.external_refs * 40 + transactions * 200 +
|
||||
(extra ? 200 : 0) + extra_out_msgs * 300 + public_library_diff * 700;
|
||||
}
|
||||
|
||||
int BlockLimitStatus::classify() const {
|
||||
|
|
|
@ -239,6 +239,12 @@ struct ParamLimits {
|
|||
bool deserialize(vm::CellSlice& cs);
|
||||
int classify(td::uint64 value) const;
|
||||
bool fits(unsigned cls, td::uint64 value) const;
|
||||
void multiply_by(double x) {
|
||||
CHECK(x > 0.0);
|
||||
for (td::uint32& y : limits_) {
|
||||
y = (td::uint32)std::min<double>(y * x, 1e9);
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
std::array<td::uint32, limits_cnt> limits_;
|
||||
|
|
|
@ -666,15 +666,15 @@ wc_split_merge_timings#0
|
|||
//workchain#a5 enabled_since:uint32 min_split:(## 8) max_split:(## 8)
|
||||
// { min_split <= max_split } { max_split <= 60 }
|
||||
|
||||
workchain#a6 enabled_since:uint32 actual_min_split:(## 8)
|
||||
min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split }
|
||||
workchain#a6 enabled_since:uint32 monitor_min_split:(## 8)
|
||||
min_split:(## 8) max_split:(## 8) { monitor_min_split <= min_split }
|
||||
basic:(## 1) active:Bool accept_msgs:Bool flags:(## 13) { flags = 0 }
|
||||
zerostate_root_hash:bits256 zerostate_file_hash:bits256
|
||||
version:uint32 format:(WorkchainFormat basic)
|
||||
= WorkchainDescr;
|
||||
|
||||
workchain_v2#a7 enabled_since:uint32 actual_min_split:(## 8)
|
||||
min_split:(## 8) max_split:(## 8) { actual_min_split <= min_split }
|
||||
workchain_v2#a7 enabled_since:uint32 monitor_min_split:(## 8)
|
||||
min_split:(## 8) max_split:(## 8) { monitor_min_split <= min_split }
|
||||
basic:(## 1) active:Bool accept_msgs:Bool flags:(## 13) { flags = 0 }
|
||||
zerostate_root_hash:bits256 zerostate_file_hash:bits256
|
||||
version:uint32 format:(WorkchainFormat basic)
|
||||
|
|
|
@ -426,7 +426,7 @@ bool store_validator_list_hash(vm::CellBuilder& cb) {
|
|||
LOG_CHECK(vset) << "unpacked validator set is empty";
|
||||
auto ccvc = block::Config::unpack_catchain_validators_config(config_dict.lookup_ref(td::BitArray<32>{28}));
|
||||
ton::ShardIdFull shard{ton::masterchainId};
|
||||
auto nodes = block::Config::do_compute_validator_set(ccvc, shard, *vset, now, 0);
|
||||
auto nodes = block::Config::do_compute_validator_set(ccvc, shard, *vset, 0);
|
||||
LOG_CHECK(!nodes.empty()) << "validator node list in unpacked validator set is empty";
|
||||
auto vset_hash = block::compute_validator_set_hash(0, shard, std::move(nodes));
|
||||
LOG(DEBUG) << "initial validator set hash is " << vset_hash;
|
||||
|
|
|
@ -320,7 +320,7 @@ ton::ValidatorSessionConfig Config::get_consensus_config() const {
|
|||
c.max_block_size = r.max_block_bytes;
|
||||
c.max_collated_data_size = r.max_collated_bytes;
|
||||
};
|
||||
auto set_v2 = [&] (auto& r) {
|
||||
auto set_v2 = [&](auto& r) {
|
||||
set_v1(r);
|
||||
c.new_catchain_ids = r.new_catchain_ids;
|
||||
};
|
||||
|
@ -1746,7 +1746,7 @@ ton::CatchainSeqno ConfigInfo::get_shard_cc_seqno(ton::ShardIdFull shard) const
|
|||
|
||||
std::vector<ton::ValidatorDescr> Config::compute_validator_set(ton::ShardIdFull shard, const block::ValidatorSet& vset,
|
||||
ton::UnixTime time, ton::CatchainSeqno cc_seqno) const {
|
||||
return do_compute_validator_set(get_catchain_validators_config(), shard, vset, time, cc_seqno);
|
||||
return do_compute_validator_set(get_catchain_validators_config(), shard, vset, cc_seqno);
|
||||
}
|
||||
|
||||
std::vector<ton::ValidatorDescr> Config::compute_validator_set(ton::ShardIdFull shard, ton::UnixTime time,
|
||||
|
@ -1773,7 +1773,7 @@ std::vector<ton::ValidatorDescr> ConfigInfo::compute_validator_set_cc(ton::Shard
|
|||
if (cc_seqno_delta) {
|
||||
cc_seqno = *cc_seqno_delta += cc_seqno;
|
||||
}
|
||||
return do_compute_validator_set(get_catchain_validators_config(), shard, vset, time, cc_seqno);
|
||||
return do_compute_validator_set(get_catchain_validators_config(), shard, vset, cc_seqno);
|
||||
}
|
||||
|
||||
std::vector<ton::ValidatorDescr> ConfigInfo::compute_validator_set_cc(ton::ShardIdFull shard, ton::UnixTime time,
|
||||
|
@ -1856,9 +1856,8 @@ int ValidatorSet::lookup_public_key(td::ConstBitPtr pubkey) const {
|
|||
return -1;
|
||||
}
|
||||
|
||||
std::vector<ton::ValidatorDescr> Config::do_compute_validator_set(const block::CatchainValidatorsConfig& ccv_conf,
|
||||
ton::ShardIdFull shard,
|
||||
const block::ValidatorSet& vset, ton::UnixTime time,
|
||||
std::vector<ton::ValidatorDescr> Config::do_compute_validator_set(const CatchainValidatorsConfig& ccv_conf,
|
||||
ton::ShardIdFull shard, const ValidatorSet& vset,
|
||||
ton::CatchainSeqno cc_seqno) {
|
||||
// LOG(DEBUG) << "in Config::do_compute_validator_set() for " << shard.to_str() << " ; cc_seqno=" << cc_seqno;
|
||||
std::vector<ton::ValidatorDescr> nodes;
|
||||
|
@ -2075,7 +2074,7 @@ bool WorkchainInfo::unpack(ton::WorkchainId wc, vm::CellSlice& cs) {
|
|||
}
|
||||
auto unpack_v1 = [this](auto& info) {
|
||||
enabled_since = info.enabled_since;
|
||||
actual_min_split = info.actual_min_split;
|
||||
monitor_min_split = info.monitor_min_split;
|
||||
min_split = info.min_split;
|
||||
max_split = info.max_split;
|
||||
basic = info.basic;
|
||||
|
@ -2299,17 +2298,14 @@ td::Result<Ref<vm::Tuple>> ConfigInfo::get_prev_blocks_info() const {
|
|||
if (shard->sgn() < 0) {
|
||||
shard &= ((td::make_refint(1) << 64) - 1);
|
||||
}
|
||||
return vm::make_tuple_ref(
|
||||
td::make_refint(block_id.id.workchain),
|
||||
std::move(shard),
|
||||
td::make_refint(block_id.id.seqno),
|
||||
td::bits_to_refint(block_id.root_hash.bits(), 256),
|
||||
return vm::make_tuple_ref(td::make_refint(block_id.id.workchain), std::move(shard),
|
||||
td::make_refint(block_id.id.seqno), td::bits_to_refint(block_id.root_hash.bits(), 256),
|
||||
td::bits_to_refint(block_id.file_hash.bits(), 256));
|
||||
};
|
||||
std::vector<vm::StackEntry> last_mc_blocks;
|
||||
|
||||
last_mc_blocks.push_back(block_id_to_tuple(block_id));
|
||||
for (ton::BlockSeqno seqno = block_id.id.seqno; seqno > 0 && last_mc_blocks.size() < 16; ) {
|
||||
for (ton::BlockSeqno seqno = block_id.id.seqno; seqno > 0 && last_mc_blocks.size() < 16;) {
|
||||
--seqno;
|
||||
ton::BlockIdExt block_id;
|
||||
if (!get_old_mc_block_id(seqno, block_id)) {
|
||||
|
@ -2323,8 +2319,7 @@ td::Result<Ref<vm::Tuple>> ConfigInfo::get_prev_blocks_info() const {
|
|||
if (!get_last_key_block(last_key_block, last_key_block_lt)) {
|
||||
return td::Status::Error("cannot fetch last key block");
|
||||
}
|
||||
return vm::make_tuple_ref(
|
||||
td::make_cnt_ref<std::vector<vm::StackEntry>>(std::move(last_mc_blocks)),
|
||||
return vm::make_tuple_ref(td::make_cnt_ref<std::vector<vm::StackEntry>>(std::move(last_mc_blocks)),
|
||||
block_id_to_tuple(last_key_block));
|
||||
}
|
||||
|
||||
|
|
|
@ -197,6 +197,7 @@ struct McShardHash : public McShardHashI {
|
|||
: blk_(blk), start_lt_(start_lt), end_lt_(end_lt) {
|
||||
}
|
||||
McShardHash(const McShardHash&) = default;
|
||||
McShardHash& operator=(const McShardHash&) = default;
|
||||
bool is_valid() const {
|
||||
return blk_.is_valid();
|
||||
}
|
||||
|
@ -414,7 +415,7 @@ struct CatchainValidatorsConfig {
|
|||
struct WorkchainInfo : public td::CntObject {
|
||||
ton::WorkchainId workchain{ton::workchainInvalid};
|
||||
ton::UnixTime enabled_since;
|
||||
td::uint32 actual_min_split;
|
||||
td::uint32 monitor_min_split;
|
||||
td::uint32 min_split, max_split;
|
||||
bool basic;
|
||||
bool active;
|
||||
|
@ -455,10 +456,11 @@ class ShardConfig {
|
|||
ShardConfig() = default;
|
||||
ShardConfig(const ShardConfig& other);
|
||||
ShardConfig(ShardConfig&& other) = default;
|
||||
ShardConfig(Ref<vm::Cell> shard_hashes, Ref<McShardHash> mc_shard_hash = {})
|
||||
explicit ShardConfig(Ref<vm::Cell> shard_hashes, Ref<McShardHash> mc_shard_hash = {})
|
||||
: shard_hashes_(std::move(shard_hashes)), mc_shard_hash_(std::move(mc_shard_hash)) {
|
||||
init();
|
||||
}
|
||||
ShardConfig& operator=(ShardConfig&& other) = default;
|
||||
bool is_valid() const {
|
||||
return valid_;
|
||||
}
|
||||
|
@ -545,7 +547,10 @@ class Config {
|
|||
};
|
||||
|
||||
public:
|
||||
enum { needValidatorSet = 16, needSpecialSmc = 32, needWorkchainInfo = 256, needCapabilities = 512 };
|
||||
static constexpr int needValidatorSet = 16;
|
||||
static constexpr int needSpecialSmc = 32;
|
||||
static constexpr int needWorkchainInfo = 256;
|
||||
static constexpr int needCapabilities = 512;
|
||||
int mode{0};
|
||||
ton::BlockIdExt block_id;
|
||||
|
||||
|
@ -655,9 +660,8 @@ class Config {
|
|||
BurningConfig get_burning_config() const;
|
||||
td::Ref<vm::Tuple> get_unpacked_config_tuple(ton::UnixTime now) const;
|
||||
PrecompiledContractsConfig get_precompiled_contracts_config() const;
|
||||
static std::vector<ton::ValidatorDescr> do_compute_validator_set(const block::CatchainValidatorsConfig& ccv_conf,
|
||||
ton::ShardIdFull shard,
|
||||
const block::ValidatorSet& vset, ton::UnixTime time,
|
||||
static std::vector<ton::ValidatorDescr> do_compute_validator_set(const CatchainValidatorsConfig& ccv_conf,
|
||||
ton::ShardIdFull shard, const ValidatorSet& vset,
|
||||
ton::CatchainSeqno cc_seqno);
|
||||
|
||||
static td::Result<std::unique_ptr<Config>> unpack_config(Ref<vm::Cell> config_root,
|
||||
|
@ -682,14 +686,12 @@ class Config {
|
|||
|
||||
class ConfigInfo : public Config, public ShardConfig {
|
||||
public:
|
||||
enum {
|
||||
needStateRoot = 1,
|
||||
needLibraries = 2,
|
||||
needStateExtraRoot = 4,
|
||||
needShardHashes = 8,
|
||||
needAccountsRoot = 64,
|
||||
needPrevBlocks = 128
|
||||
};
|
||||
static constexpr int needStateRoot = 1;
|
||||
static constexpr int needLibraries = 2;
|
||||
static constexpr int needStateExtraRoot = 4;
|
||||
static constexpr int needShardHashes = 8;
|
||||
static constexpr int needAccountsRoot = 64;
|
||||
static constexpr int needPrevBlocks = 128;
|
||||
ton::BlockSeqno vert_seqno{~0U};
|
||||
int global_id_{0};
|
||||
ton::UnixTime utime{0};
|
||||
|
|
|
@ -1555,7 +1555,14 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) {
|
|||
// ...
|
||||
compute_phase = std::make_unique<ComputePhase>();
|
||||
ComputePhase& cp = *(compute_phase.get());
|
||||
if (cfg.global_version >= 9) {
|
||||
original_balance = balance;
|
||||
if (msg_balance_remaining.is_valid()) {
|
||||
original_balance -= msg_balance_remaining;
|
||||
}
|
||||
} else {
|
||||
original_balance -= total_fees;
|
||||
}
|
||||
if (td::sgn(balance.grams) <= 0) {
|
||||
// no gas
|
||||
cp.skip_reason = ComputePhase::sk_no_gas;
|
||||
|
@ -2860,6 +2867,8 @@ td::Status Transaction::check_state_limits(const SizeLimitsConfig& size_limits,
|
|||
vm::CellStorageStat storage_stat;
|
||||
storage_stat.limit_cells = size_limits.max_acc_state_cells;
|
||||
storage_stat.limit_bits = size_limits.max_acc_state_bits;
|
||||
{
|
||||
TD_PERF_COUNTER(transaction_storage_stat_a);
|
||||
td::Timer timer;
|
||||
auto add_used_storage = [&](const td::Ref<vm::Cell>& cell) -> td::Status {
|
||||
if (cell.not_null()) {
|
||||
|
@ -2876,6 +2885,8 @@ td::Status Transaction::check_state_limits(const SizeLimitsConfig& size_limits,
|
|||
if (timer.elapsed() > 0.1) {
|
||||
LOG(INFO) << "Compute used storage took " << timer.elapsed() << "s";
|
||||
}
|
||||
}
|
||||
|
||||
if (acc_status == Account::acc_active) {
|
||||
storage_stat.clear_limit();
|
||||
} else {
|
||||
|
@ -3156,6 +3167,7 @@ bool Transaction::compute_state() {
|
|||
if (new_stats) {
|
||||
stats = new_stats.unwrap();
|
||||
} else {
|
||||
TD_PERF_COUNTER(transaction_storage_stat_b);
|
||||
td::Timer timer;
|
||||
stats.add_used_storage(Ref<vm::Cell>(storage)).ensure();
|
||||
if (timer.elapsed() > 0.1) {
|
||||
|
|
|
@ -2294,11 +2294,11 @@ std::string AnyIntView<Tr>::to_dec_string_destroy_any() {
|
|||
stack.push_back(divmod_short_any(Tr::max_pow10));
|
||||
} while (sgn());
|
||||
char slice[word_bits * 97879 / 325147 + 2];
|
||||
std::sprintf(slice, "%lld", stack.back());
|
||||
std::snprintf(slice, sizeof(slice), "%lld", stack.back());
|
||||
s += slice;
|
||||
stack.pop_back();
|
||||
while (stack.size()) {
|
||||
std::sprintf(slice, "%018lld", stack.back());
|
||||
std::snprintf(slice, sizeof(slice), "%018lld", stack.back());
|
||||
s += slice;
|
||||
stack.pop_back();
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ Ref<CntObject> CntObject::clone() const {
|
|||
namespace detail {
|
||||
struct SafeDeleter {
|
||||
public:
|
||||
thread_local static td::int64 delete_count;
|
||||
void retire(const CntObject *ptr) {
|
||||
if (is_active_) {
|
||||
to_delete_.push_back(ptr);
|
||||
|
@ -39,9 +40,11 @@ struct SafeDeleter {
|
|||
is_active_ = false;
|
||||
};
|
||||
delete ptr;
|
||||
delete_count++;
|
||||
while (!to_delete_.empty()) {
|
||||
auto *ptr = to_delete_.back();
|
||||
to_delete_.pop_back();
|
||||
delete_count++;
|
||||
delete ptr;
|
||||
}
|
||||
}
|
||||
|
@ -50,6 +53,7 @@ struct SafeDeleter {
|
|||
std::vector<const CntObject *> to_delete_;
|
||||
bool is_active_{false};
|
||||
};
|
||||
thread_local td::int64 SafeDeleter::delete_count{0};
|
||||
|
||||
TD_THREAD_LOCAL SafeDeleter *deleter;
|
||||
void safe_delete(const CntObject *ptr) {
|
||||
|
@ -57,4 +61,7 @@ void safe_delete(const CntObject *ptr) {
|
|||
deleter->retire(ptr);
|
||||
}
|
||||
} // namespace detail
|
||||
int64 ref_get_delete_count() {
|
||||
return detail::SafeDeleter::delete_count;
|
||||
}
|
||||
} // namespace td
|
||||
|
|
|
@ -472,5 +472,6 @@ template <class T>
|
|||
void swap(Ref<T>& r1, Ref<T>& r2) {
|
||||
r1.swap(r2);
|
||||
}
|
||||
int64 ref_get_delete_count();
|
||||
|
||||
} // namespace td
|
||||
|
|
|
@ -3425,7 +3425,7 @@ void import_cmdline_args(Dictionary& d, std::string arg0, int n, const char* con
|
|||
cmdline_args->set(std::move(list));
|
||||
for (int i = 1; i <= n; i++) {
|
||||
char buffer[14];
|
||||
sprintf(buffer, "$%d ", i);
|
||||
snprintf(buffer, sizeof(buffer), "$%d ", i);
|
||||
d.def_stack_word(buffer, std::bind(interpret_get_fixed_cmdline_arg, _1, i));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ bool CodeBlob::compute_used_code_vars(std::unique_ptr<Op>& ops_ptr, const VarDes
|
|||
func_assert(ops_ptr->cl == Op::_Nop);
|
||||
return ops_ptr->set_var_info(var_info);
|
||||
}
|
||||
return compute_used_code_vars(ops_ptr->next, var_info, edit) | ops_ptr->compute_used_vars(*this, edit);
|
||||
return int(compute_used_code_vars(ops_ptr->next, var_info, edit)) | int(ops_ptr->compute_used_vars(*this, edit));
|
||||
}
|
||||
|
||||
bool operator==(const VarDescrList& x, const VarDescrList& y) {
|
||||
|
@ -584,7 +584,7 @@ bool prune_unreachable(std::unique_ptr<Op>& ops) {
|
|||
ops = std::move(op.block1);
|
||||
return prune_unreachable(ops);
|
||||
} else {
|
||||
reach = prune_unreachable(op.block0) | prune_unreachable(op.block1);
|
||||
reach = int(prune_unreachable(op.block0)) | int(prune_unreachable(op.block1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -660,7 +660,7 @@ bool prune_unreachable(std::unique_ptr<Op>& ops) {
|
|||
break;
|
||||
}
|
||||
case Op::_TryCatch: {
|
||||
reach = prune_unreachable(op.block0) | prune_unreachable(op.block1);
|
||||
reach = int(prune_unreachable(op.block0)) | int(prune_unreachable(op.block1));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
|
@ -892,15 +892,15 @@ bool Op::mark_noreturn() {
|
|||
return set_noreturn(true);
|
||||
case _If:
|
||||
case _TryCatch:
|
||||
return set_noreturn((block0->mark_noreturn() & (block1 && block1->mark_noreturn())) | next->mark_noreturn());
|
||||
return set_noreturn((int(block0->mark_noreturn()) & int(block1 && block1->mark_noreturn())) | int(next->mark_noreturn()));
|
||||
case _Again:
|
||||
block0->mark_noreturn();
|
||||
return set_noreturn(true);
|
||||
case _Until:
|
||||
return set_noreturn(block0->mark_noreturn() | next->mark_noreturn());
|
||||
return set_noreturn(int(block0->mark_noreturn()) | int(next->mark_noreturn()));
|
||||
case _While:
|
||||
block1->mark_noreturn();
|
||||
return set_noreturn(block0->mark_noreturn() | next->mark_noreturn());
|
||||
return set_noreturn(int(block0->mark_noreturn()) | int(next->mark_noreturn()));
|
||||
case _Repeat:
|
||||
block0->mark_noreturn();
|
||||
return set_noreturn(next->mark_noreturn());
|
||||
|
|
53
crypto/func/auto-tests/tests/bit-operators.fc
Normal file
53
crypto/func/auto-tests/tests/bit-operators.fc
Normal file
|
@ -0,0 +1,53 @@
|
|||
|
||||
int lshift() {
|
||||
return (1 << 0) == 1;
|
||||
}
|
||||
|
||||
int rshift() {
|
||||
return (1 >> 0) == 1;
|
||||
}
|
||||
|
||||
int lshift_var(int i) {
|
||||
return (1 << i) == 1;
|
||||
}
|
||||
|
||||
int rshift_var(int i) {
|
||||
return (1 >> i) == 1;
|
||||
}
|
||||
|
||||
int main(int x) {
|
||||
if (x == 0) {
|
||||
return lshift();
|
||||
} elseif (x == 1) {
|
||||
return rshift();
|
||||
} elseif (x == 2) {
|
||||
return lshift_var(0);
|
||||
} elseif (x == 3) {
|
||||
return rshift_var(0);
|
||||
} elseif (x == 4) {
|
||||
return lshift_var(1);
|
||||
} else {
|
||||
return rshift_var(1);
|
||||
}
|
||||
}
|
||||
|
||||
int is_claimed(int index) method_id(11) {
|
||||
int claim_bit_index = index % 256;
|
||||
int mask = 1 << claim_bit_index;
|
||||
return (255 & mask) == mask;
|
||||
}
|
||||
|
||||
|
||||
{-
|
||||
method_id | in | out
|
||||
TESTCASE | 0 | 0 | -1
|
||||
TESTCASE | 0 | 1 | -1
|
||||
TESTCASE | 0 | 2 | -1
|
||||
TESTCASE | 0 | 3 | -1
|
||||
TESTCASE | 0 | 4 | 0
|
||||
TESTCASE | 0 | 5 | 0
|
||||
TESTCASE | 11 | 0 | -1
|
||||
TESTCASE | 11 | 1 | -1
|
||||
TESTCASE | 11 | 256 | -1
|
||||
TESTCASE | 11 | 8 | 0
|
||||
-}
|
|
@ -264,7 +264,7 @@ int emulate_lshift(int a, int b) {
|
|||
}
|
||||
int t = ((b & VarDescr::_NonZero) ? VarDescr::_Even : 0);
|
||||
t |= b & VarDescr::_Finite;
|
||||
return emulate_mul(a, VarDescr::_Int | VarDescr::_Pos | VarDescr::_NonZero | VarDescr::_Even | t);
|
||||
return emulate_mul(a, VarDescr::_Int | VarDescr::_Pos | VarDescr::_NonZero | t);
|
||||
}
|
||||
|
||||
int emulate_div(int a, int b) {
|
||||
|
@ -310,7 +310,7 @@ int emulate_rshift(int a, int b) {
|
|||
}
|
||||
int t = ((b & VarDescr::_NonZero) ? VarDescr::_Even : 0);
|
||||
t |= b & VarDescr::_Finite;
|
||||
return emulate_div(a, VarDescr::_Int | VarDescr::_Pos | VarDescr::_NonZero | VarDescr::_Even | t);
|
||||
return emulate_div(a, VarDescr::_Int | VarDescr::_Pos | VarDescr::_NonZero | t);
|
||||
}
|
||||
|
||||
int emulate_mod(int a, int b, int round_mode = -1) {
|
||||
|
|
|
@ -45,7 +45,7 @@ extern std::string generated_from;
|
|||
|
||||
constexpr int optimize_depth = 20;
|
||||
|
||||
const std::string func_version{"0.4.4"};
|
||||
const std::string func_version{"0.4.5"};
|
||||
|
||||
enum Keyword {
|
||||
_Eof = -1,
|
||||
|
|
|
@ -48,6 +48,7 @@ struct OpensslEVP_SHA512 {
|
|||
|
||||
template <typename H>
|
||||
class HashCtx {
|
||||
EVP_MD_CTX *base_ctx{nullptr};
|
||||
EVP_MD_CTX *ctx{nullptr};
|
||||
void init();
|
||||
void clear();
|
||||
|
@ -77,16 +78,20 @@ class HashCtx {
|
|||
template <typename H>
|
||||
void HashCtx<H>::init() {
|
||||
ctx = EVP_MD_CTX_create();
|
||||
base_ctx = EVP_MD_CTX_create();
|
||||
EVP_DigestInit_ex(base_ctx, H::get_evp(), 0);
|
||||
reset();
|
||||
}
|
||||
|
||||
template <typename H>
|
||||
void HashCtx<H>::reset() {
|
||||
EVP_DigestInit_ex(ctx, H::get_evp(), 0);
|
||||
EVP_MD_CTX_copy_ex(ctx, base_ctx);
|
||||
}
|
||||
|
||||
template <typename H>
|
||||
void HashCtx<H>::clear() {
|
||||
EVP_MD_CTX_destroy(base_ctx);
|
||||
base_ctx = nullptr;
|
||||
EVP_MD_CTX_destroy(ctx);
|
||||
ctx = nullptr;
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ variable extra-currencies
|
|||
{ extra-currencies @ cc+ extra-currencies ! } : extra-cc+!
|
||||
|
||||
begin-options
|
||||
" <filename-base> <dest-addr> <subwallet-id> <seqno> <amount> [-x <extra-amount>*<extra-currency-id>] [-n|-b] [-t<timeout>] [-B <body-boc>] [-C <comment>] [<savefile>]" +cr +tab
|
||||
" <filename-base> <dest-addr> <subwallet-id> <seqno> <amount> [-x <extra-amount>*<extra-currency-id>] [-n|-b] [-t<timeout>] [-B <body-boc>] [-C <comment>] [-I <init-boc>] [<savefile>]" +cr +tab
|
||||
+"Creates a request to advanced wallet created by new-wallet-v3.fif, with private key loaded from file <filename-base>.pk "
|
||||
+"and address from <filename-base>.addr, and saves it into <savefile>.boc ('wallet-query.boc' by default)"
|
||||
disable-digit-options generic-help-setopt
|
||||
|
@ -29,6 +29,8 @@ begin-options
|
|||
"Sets the payload of the transfer message" option-help
|
||||
"C" "--comment" { =: comment } short-long-option-arg
|
||||
"Sets the comment to be sent in the transfer message" option-help
|
||||
"I" "--with-init" { =: init-file } short-long-option-arg
|
||||
"Indicates filename with BoC containing StateInit for internal message" option-help
|
||||
"m" "--mode" { parse-int =: send-mode } short-long-option-arg
|
||||
"Sets transfer mode (0..255) for SENDRAWMSG (" send-mode (.) $+ +" by default)"
|
||||
option-help
|
||||
|
@ -57,14 +59,18 @@ file-base +".pk" load-keypair nip constant wallet_pk
|
|||
def? body-boc-file { @' body-boc-file file>B B>boc } { comment simple-transfer-body } cond
|
||||
constant body-cell
|
||||
|
||||
def? init-file { @' init-file file>B B>boc <s b{11} swap |_ } { b{0} } cond
|
||||
=: state-init
|
||||
|
||||
."Transferring " amount .GR+cc ."to account "
|
||||
dest_addr 2dup bounce 7 + .Addr ." = " .addr
|
||||
."subwallet_id=0x" subwallet_id x.
|
||||
."seqno=0x" seqno x. ."bounce=" bounce . cr
|
||||
."Body of transfer message is " body-cell <s csr. cr
|
||||
."StateInit is " state-init csr. cr
|
||||
|
||||
// create a message
|
||||
<b b{01} s, bounce 1 i, b{000} s, dest_addr Addr, amount Gram+cc, 0 9 64 32 + + u,
|
||||
<b b{01} s, bounce 1 i, b{000} s, dest_addr Addr, amount Gram+cc, 0 8 64 32 + + u, state-init s,
|
||||
body-cell <s 2dup 1 s-fits-with? not rot over 1 i, -rot { drop body-cell ref, } { s, } cond
|
||||
b>
|
||||
<b subwallet_id 32 u, now timeout + 32 u, seqno 32 u, send-mode 8 u, swap ref, b>
|
||||
|
|
|
@ -61,7 +61,8 @@ block::StdAddress GenericAccount::get_address(ton::WorkchainId workchain_id,
|
|||
return block::StdAddress(workchain_id, init_state->get_hash().bits(), true /*bounce*/);
|
||||
}
|
||||
|
||||
void GenericAccount::store_int_message(vm::CellBuilder& cb, const block::StdAddress& dest_address, td::int64 gramms) {
|
||||
void GenericAccount::store_int_message(vm::CellBuilder& cb, const block::StdAddress& dest_address, td::int64 gramms,
|
||||
td::Ref<vm::Cell> extra_currencies) {
|
||||
td::BigInt256 dest_addr;
|
||||
dest_addr.import_bits(dest_address.addr.as_bitslice());
|
||||
cb.store_zeroes(1)
|
||||
|
@ -73,7 +74,8 @@ void GenericAccount::store_int_message(vm::CellBuilder& cb, const block::StdAddr
|
|||
.store_long(dest_address.workchain, 8)
|
||||
.store_int256(dest_addr, 256);
|
||||
block::tlb::t_Grams.store_integer_value(cb, td::BigInt256(gramms));
|
||||
cb.store_zeroes(9 + 64 + 32);
|
||||
cb.store_maybe_ref(extra_currencies);
|
||||
cb.store_zeroes(8 + 64 + 32);
|
||||
}
|
||||
|
||||
td::Ref<vm::Cell> GenericAccount::create_ext_message(const block::StdAddress& address, td::Ref<vm::Cell> new_state,
|
||||
|
|
|
@ -36,7 +36,8 @@ class GenericAccount {
|
|||
static block::StdAddress get_address(ton::WorkchainId workchain_id, const td::Ref<vm::Cell>& init_state) noexcept;
|
||||
static td::Ref<vm::Cell> create_ext_message(const block::StdAddress& address, td::Ref<vm::Cell> new_state,
|
||||
td::Ref<vm::Cell> body) noexcept;
|
||||
static void store_int_message(vm::CellBuilder& cb, const block::StdAddress& dest_address, td::int64 gramms);
|
||||
static void store_int_message(vm::CellBuilder& cb, const block::StdAddress& dest_address, td::int64 gramms,
|
||||
td::Ref<vm::Cell> extra_currencies);
|
||||
|
||||
static td::Result<td::Ed25519::PublicKey> get_public_key(const SmartContract& sc);
|
||||
static td::Result<td::uint32> get_seqno(const SmartContract& sc);
|
||||
|
|
|
@ -48,7 +48,7 @@ td::Result<td::Ref<vm::Cell>> WalletInterface::get_init_message(const td::Ed2551
|
|||
|
||||
td::Ref<vm::Cell> WalletInterface::create_int_message(const Gift &gift) {
|
||||
vm::CellBuilder cbi;
|
||||
GenericAccount::store_int_message(cbi, gift.destination, gift.gramms < 0 ? 0 : gift.gramms);
|
||||
GenericAccount::store_int_message(cbi, gift.destination, gift.gramms < 0 ? 0 : gift.gramms, gift.extra_currencies);
|
||||
if (gift.init_state.not_null()) {
|
||||
cbi.store_ones(2);
|
||||
cbi.store_ref(gift.init_state);
|
||||
|
|
|
@ -37,6 +37,7 @@ class WalletInterface : public SmartContract {
|
|||
struct Gift {
|
||||
block::StdAddress destination;
|
||||
td::int64 gramms;
|
||||
td::Ref<vm::Cell> extra_currencies;
|
||||
td::int32 send_mode{-1};
|
||||
|
||||
bool is_encrypted{false};
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
Copyright 2017-2020 Telegram Systems LLP
|
||||
*/
|
||||
#include "crypto/Ed25519.h"
|
||||
#include "ellcurve/Ed25519.h"
|
||||
|
||||
#include "td/utils/logging.h"
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/Slice.h"
|
||||
|
@ -24,6 +26,8 @@
|
|||
#include "td/utils/JsonBuilder.h"
|
||||
|
||||
#include "wycheproof.h"
|
||||
#include "keys/keys.hpp"
|
||||
#include "td/utils/benchmark.h"
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
@ -217,3 +221,36 @@ TEST(Crypto, almost_zero) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
BENCH(ed25519_sign, "ed25519_sign") {
|
||||
auto private_key = td::Ed25519::generate_private_key().move_as_ok();
|
||||
std::string hash_to_sign(32, 'a');
|
||||
for (int i = 0; i < n; i++) {
|
||||
private_key.sign(hash_to_sign).ensure();
|
||||
}
|
||||
}
|
||||
|
||||
BENCH(ed25519_shared_secret, "ed25519_shared_secret") {
|
||||
auto private_key_a = td::Ed25519::generate_private_key().move_as_ok();
|
||||
auto private_key_b = td::Ed25519::generate_private_key().move_as_ok();
|
||||
auto public_key_b = private_key_a.get_public_key().move_as_ok();
|
||||
for (int i = 0; i < n; i++) {
|
||||
td::Ed25519::compute_shared_secret(public_key_b, private_key_a).ensure();
|
||||
}
|
||||
}
|
||||
|
||||
BENCH(ed25519_verify, "ed25519_verify") {
|
||||
auto private_key = td::Ed25519::generate_private_key().move_as_ok();
|
||||
std::string hash_to_sign(32, 'a');
|
||||
auto public_key = private_key.get_public_key().move_as_ok();
|
||||
auto signature = private_key.sign(hash_to_sign).move_as_ok();
|
||||
for (int i = 0; i < n; i++) {
|
||||
public_key.verify_signature(hash_to_sign, signature).ensure();
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Crypto, ed25519_benchmark) {
|
||||
bench(ed25519_signBench());
|
||||
bench(ed25519_shared_secretBench());
|
||||
bench(ed25519_verifyBench());
|
||||
}
|
|
@ -54,12 +54,88 @@
|
|||
|
||||
#include <set>
|
||||
#include <map>
|
||||
#include <thread>
|
||||
|
||||
#include <openssl/sha.h>
|
||||
|
||||
#include "openssl/digest.hpp"
|
||||
#include "vm/dict.h"
|
||||
|
||||
#include <condition_variable>
|
||||
#include <latch>
|
||||
#include <numeric>
|
||||
#include <optional>
|
||||
#include <queue>
|
||||
|
||||
namespace vm {
|
||||
class ThreadExecutor : public DynamicBagOfCellsDb::AsyncExecutor {
|
||||
public:
|
||||
explicit ThreadExecutor(size_t threads_n) {
|
||||
for (size_t i = 0; i < threads_n; ++i) {
|
||||
threads_.emplace_back([this]() {
|
||||
while (true) {
|
||||
auto task = pop_task();
|
||||
if (!task) {
|
||||
break;
|
||||
}
|
||||
CHECK(generation_.load() % 2 == 1);
|
||||
task();
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
~ThreadExecutor() override {
|
||||
for (size_t i = 0; i < threads_.size(); ++i) {
|
||||
push_task({});
|
||||
}
|
||||
for (auto &t : threads_) {
|
||||
t.join();
|
||||
}
|
||||
}
|
||||
|
||||
void execute_async(std::function<void()> f) override {
|
||||
push_task(std::move(f));
|
||||
}
|
||||
|
||||
void execute_sync(std::function<void()> f) override {
|
||||
auto x = generation_.load();
|
||||
std::scoped_lock lock(sync_mutex_);
|
||||
CHECK(x == generation_);
|
||||
CHECK(generation_.load() % 2 == 1);
|
||||
f();
|
||||
CHECK(generation_.load() % 2 == 1);
|
||||
}
|
||||
void inc_generation() {
|
||||
generation_.fetch_add(1);
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<size_t> generation_{0};
|
||||
std::queue<std::pair<std::function<void()>, size_t>> queue_;
|
||||
std::mutex queue_mutex_;
|
||||
std::condition_variable cv_;
|
||||
std::mutex sync_mutex_;
|
||||
std::vector<td::thread> threads_;
|
||||
|
||||
std::function<void()> pop_task() {
|
||||
std::unique_lock lock(queue_mutex_);
|
||||
cv_.wait(lock, [&] { return !queue_.empty(); });
|
||||
CHECK(!queue_.empty());
|
||||
auto task = std::move(queue_.front());
|
||||
queue_.pop();
|
||||
CHECK(task.second == generation_);
|
||||
return task.first;
|
||||
}
|
||||
|
||||
void push_task(std::function<void()> task) {
|
||||
{
|
||||
std::scoped_lock lock(queue_mutex_);
|
||||
queue_.emplace(std::move(task), generation_.load());
|
||||
}
|
||||
cv_.notify_one();
|
||||
}
|
||||
};
|
||||
|
||||
std::vector<int> do_get_serialization_modes() {
|
||||
std::vector<int> res;
|
||||
|
@ -82,9 +158,23 @@ int get_random_serialization_mode(T &rnd) {
|
|||
return modes[rnd.fast(0, (int)modes.size() - 1)];
|
||||
}
|
||||
|
||||
class BenchSha256 : public td::Benchmark {
|
||||
class BenchSha : public td::Benchmark {
|
||||
public:
|
||||
explicit BenchSha(size_t n) : str_(n, 'a') {
|
||||
}
|
||||
std::string get_description() const override {
|
||||
return PSTRING() << get_name() << " length=" << str_.size();
|
||||
}
|
||||
|
||||
virtual std::string get_name() const = 0;
|
||||
|
||||
protected:
|
||||
std::string str_;
|
||||
};
|
||||
class BenchSha256 : public BenchSha {
|
||||
public:
|
||||
using BenchSha::BenchSha;
|
||||
std::string get_name() const override {
|
||||
return "SHA256";
|
||||
}
|
||||
|
||||
|
@ -92,7 +182,7 @@ class BenchSha256 : public td::Benchmark {
|
|||
int res = 0;
|
||||
for (int i = 0; i < n; i++) {
|
||||
digest::SHA256 hasher;
|
||||
hasher.feed("abcd", 4);
|
||||
hasher.feed(str_);
|
||||
unsigned char buf[32];
|
||||
hasher.extract(buf);
|
||||
res += buf[0];
|
||||
|
@ -100,10 +190,12 @@ class BenchSha256 : public td::Benchmark {
|
|||
td::do_not_optimize_away(res);
|
||||
}
|
||||
};
|
||||
class BenchSha256Reuse : public td::Benchmark {
|
||||
class BenchSha256Reuse : public BenchSha {
|
||||
public:
|
||||
std::string get_description() const override {
|
||||
return "SHA256 reuse";
|
||||
using BenchSha::BenchSha;
|
||||
|
||||
std::string get_name() const override {
|
||||
return "SHA256 reuse (used in DataCell)";
|
||||
}
|
||||
|
||||
void run(int n) override {
|
||||
|
@ -111,7 +203,7 @@ class BenchSha256Reuse : public td::Benchmark {
|
|||
digest::SHA256 hasher;
|
||||
for (int i = 0; i < n; i++) {
|
||||
hasher.reset();
|
||||
hasher.feed("abcd", 4);
|
||||
hasher.feed(str_);
|
||||
unsigned char buf[32];
|
||||
hasher.extract(buf);
|
||||
res += buf[0];
|
||||
|
@ -119,28 +211,46 @@ class BenchSha256Reuse : public td::Benchmark {
|
|||
td::do_not_optimize_away(res);
|
||||
}
|
||||
};
|
||||
class BenchSha256Low : public td::Benchmark {
|
||||
class BenchSha256Low : public BenchSha {
|
||||
public:
|
||||
std::string get_description() const override {
|
||||
using BenchSha::BenchSha;
|
||||
|
||||
std::string get_name() const override {
|
||||
return "SHA256 low level";
|
||||
}
|
||||
|
||||
// Use the old method to check for performance degradation
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
#elif defined(_MSC_VER)
|
||||
#pragma warning(push)
|
||||
#pragma warning(disable : 4996) // Disable deprecated warning for MSVC
|
||||
#endif
|
||||
void run(int n) override {
|
||||
int res = 0;
|
||||
td::Sha256State ctx;
|
||||
SHA256_CTX ctx;
|
||||
for (int i = 0; i < n; i++) {
|
||||
ctx.init();
|
||||
ctx.feed("abcd");
|
||||
SHA256_Init(&ctx);
|
||||
SHA256_Update(&ctx, str_.data(), str_.size());
|
||||
unsigned char buf[32];
|
||||
ctx.extract(td::MutableSlice{buf, 32});
|
||||
SHA256_Final(buf, &ctx);
|
||||
res += buf[0];
|
||||
}
|
||||
td::do_not_optimize_away(res);
|
||||
}
|
||||
};
|
||||
class BenchSha256Tdlib : public td::Benchmark {
|
||||
#if defined(__GNUC__) || defined(__clang__)
|
||||
#pragma GCC diagnostic pop
|
||||
#elif defined(_MSC_VER)
|
||||
#pragma warning(pop)
|
||||
#endif
|
||||
|
||||
class BenchSha256Tdlib : public BenchSha {
|
||||
public:
|
||||
std::string get_description() const override {
|
||||
using BenchSha::BenchSha;
|
||||
|
||||
std::string get_name() const override {
|
||||
return "SHA256 TDLib";
|
||||
}
|
||||
|
||||
|
@ -150,7 +260,7 @@ class BenchSha256Tdlib : public td::Benchmark {
|
|||
for (int i = 0; i < n; i++) {
|
||||
td::init_thread_local<td::Sha256State>(ctx);
|
||||
ctx->init();
|
||||
ctx->feed("abcd");
|
||||
ctx->feed(str_);
|
||||
unsigned char buf[32];
|
||||
ctx->extract(td::MutableSlice(buf, 32), false);
|
||||
res += buf[0];
|
||||
|
@ -158,11 +268,61 @@ class BenchSha256Tdlib : public td::Benchmark {
|
|||
td::do_not_optimize_away(res);
|
||||
}
|
||||
};
|
||||
|
||||
template <class F>
|
||||
void bench_threaded(F &&f) {
|
||||
class Threaded : public td::Benchmark {
|
||||
public:
|
||||
explicit Threaded(F &&f) : f_(std::move(f)), base(f_()) {
|
||||
}
|
||||
F f_;
|
||||
std::decay_t<decltype(f_())> base;
|
||||
|
||||
std::string get_description() const override {
|
||||
return base.get_description() + " threaded";
|
||||
}
|
||||
|
||||
void run(int n) override {
|
||||
std::atomic<int> task_i{0};
|
||||
int chunk_size = 1024;
|
||||
int num_threads = 16;
|
||||
n *= num_threads;
|
||||
std::vector<td::thread> threads;
|
||||
for (int i = 0; i < num_threads; i++) {
|
||||
threads.emplace_back([&]() mutable {
|
||||
auto bench = f_();
|
||||
while (true) {
|
||||
i = task_i.fetch_add(chunk_size, std::memory_order_relaxed);
|
||||
auto i_end = std::min(n, i + chunk_size);
|
||||
if (i > n) {
|
||||
break;
|
||||
}
|
||||
bench.run(i_end - i);
|
||||
}
|
||||
});
|
||||
}
|
||||
for (auto &thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
};
|
||||
};
|
||||
bench(Threaded(std::forward<F>(f)));
|
||||
}
|
||||
TEST(Cell, sha_benchmark) {
|
||||
bench(BenchSha256Tdlib());
|
||||
bench(BenchSha256Low());
|
||||
bench(BenchSha256Reuse());
|
||||
bench(BenchSha256());
|
||||
for (size_t n : {4, 64, 128}) {
|
||||
bench(BenchSha256Tdlib(n));
|
||||
bench(BenchSha256Low(n));
|
||||
bench(BenchSha256Reuse(n));
|
||||
bench(BenchSha256(n));
|
||||
}
|
||||
}
|
||||
TEST(Cell, sha_benchmark_threaded) {
|
||||
for (size_t n : {4, 64, 128}) {
|
||||
bench_threaded([n] { return BenchSha256Tdlib(n); });
|
||||
bench_threaded([n]() { return BenchSha256Low(n); });
|
||||
bench_threaded([n]() { return BenchSha256Reuse(n); });
|
||||
bench_threaded([n]() { return BenchSha256(n); });
|
||||
}
|
||||
}
|
||||
|
||||
std::string serialize_boc(Ref<Cell> cell, int mode = 31) {
|
||||
|
@ -762,16 +922,136 @@ TEST(TonDb, BocMultipleRoots) {
|
|||
}
|
||||
};
|
||||
|
||||
TEST(TonDb, DynamicBoc) {
|
||||
TEST(TonDb, InMemoryDynamicBocSimple) {
|
||||
auto counter = [] { return td::NamedThreadSafeCounter::get_default().get_counter("DataCell").sum(); };
|
||||
auto before = counter();
|
||||
SCOPE_EXIT {
|
||||
LOG_CHECK(before == counter()) << before << " vs " << counter();
|
||||
;
|
||||
};
|
||||
td::Random::Xorshift128plus rnd{123};
|
||||
auto kv = std::make_shared<td::MemoryKeyValue>();
|
||||
CellStorer storer(*kv);
|
||||
|
||||
auto boc = DynamicBagOfCellsDb::create_in_memory(kv.get(), {});
|
||||
|
||||
auto empty_cell = vm::CellBuilder().finalize();
|
||||
boc->inc(empty_cell);
|
||||
boc->prepare_commit().ensure();
|
||||
boc->commit(storer).ensure();
|
||||
auto got_empty_cell = boc->load_cell(empty_cell->get_hash().as_slice()).move_as_ok();
|
||||
ASSERT_EQ(empty_cell->get_hash(), got_empty_cell->get_hash());
|
||||
|
||||
boc->dec(empty_cell);
|
||||
|
||||
auto one_ref_cell = vm::CellBuilder().store_ref(empty_cell).finalize();
|
||||
boc->inc(one_ref_cell);
|
||||
boc->prepare_commit().ensure();
|
||||
boc->commit(storer).ensure();
|
||||
auto got_one_ref_cell = boc->load_cell(one_ref_cell->get_hash().as_slice()).move_as_ok();
|
||||
ASSERT_EQ(one_ref_cell->get_hash(), got_one_ref_cell->get_hash());
|
||||
boc = DynamicBagOfCellsDb::create_in_memory(kv.get(), {});
|
||||
|
||||
auto random_ref_cell = gen_random_cell(3, rnd);
|
||||
boc->inc(random_ref_cell);
|
||||
boc->prepare_commit().ensure();
|
||||
boc->commit(storer).ensure();
|
||||
auto got_random_ref_cell = boc->load_cell(random_ref_cell->get_hash().as_slice()).move_as_ok();
|
||||
ASSERT_EQ(random_ref_cell->get_hash(), got_random_ref_cell->get_hash());
|
||||
boc = DynamicBagOfCellsDb::create_in_memory(kv.get(), {});
|
||||
}
|
||||
|
||||
int VERBOSITY_NAME(boc) = VERBOSITY_NAME(DEBUG) + 10;
|
||||
|
||||
struct BocOptions {
|
||||
std::shared_ptr<ThreadExecutor> async_executor;
|
||||
std::optional<DynamicBagOfCellsDb::CreateInMemoryOptions> o_in_memory;
|
||||
td::uint64 seed{123};
|
||||
|
||||
auto create_dboc(td::KeyValueReader *kv, std::optional<td::int64> o_root_n) {
|
||||
if (o_in_memory) {
|
||||
auto res = DynamicBagOfCellsDb::create_in_memory(kv, *o_in_memory);
|
||||
auto stats = res->get_stats().move_as_ok();
|
||||
if (o_root_n) {
|
||||
ASSERT_EQ(*o_root_n, stats.roots_total_count);
|
||||
}
|
||||
VLOG(boc) << "reset roots_n=" << stats.roots_total_count << " cells_n=" << stats.cells_total_count;
|
||||
return res;
|
||||
}
|
||||
return DynamicBagOfCellsDb::create();
|
||||
};
|
||||
void prepare_commit(DynamicBagOfCellsDb &dboc) {
|
||||
if (async_executor) {
|
||||
async_executor->inc_generation();
|
||||
std::latch latch(1);
|
||||
td::Result<td::Unit> res;
|
||||
async_executor->execute_sync([&] {
|
||||
dboc.prepare_commit_async(async_executor, [&](auto r) {
|
||||
res = std::move(r);
|
||||
latch.count_down();
|
||||
});
|
||||
});
|
||||
latch.wait();
|
||||
async_executor->execute_sync([&] {});
|
||||
async_executor->inc_generation();
|
||||
} else {
|
||||
dboc.prepare_commit();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <class F>
|
||||
void with_all_boc_options(F &&f, size_t tests_n = 500) {
|
||||
LOG(INFO) << "Test dynamic boc";
|
||||
auto counter = [] { return td::NamedThreadSafeCounter::get_default().get_counter("DataCell").sum(); };
|
||||
auto run = [&](BocOptions options) {
|
||||
LOG(INFO) << "\t" << (options.o_in_memory ? "in memory" : "on disk") << (options.async_executor ? " async" : "");
|
||||
if (options.o_in_memory) {
|
||||
LOG(INFO) << "\t\tuse_arena=" << options.o_in_memory->use_arena
|
||||
<< " less_memory=" << options.o_in_memory->use_less_memory_during_creation;
|
||||
}
|
||||
for (td::uint32 i = 0; i < tests_n; i++) {
|
||||
auto before = counter();
|
||||
options.seed = i == 0 ? 123 : i;
|
||||
f(options);
|
||||
auto after = counter();
|
||||
LOG_CHECK((options.o_in_memory && options.o_in_memory->use_arena) || before == after)
|
||||
<< before << " vs " << after;
|
||||
}
|
||||
};
|
||||
run({.async_executor = std::make_shared<ThreadExecutor>(4)});
|
||||
run({});
|
||||
for (auto use_arena : {false, true}) {
|
||||
for (auto less_memory : {false, true}) {
|
||||
run({.o_in_memory =
|
||||
DynamicBagOfCellsDb::CreateInMemoryOptions{.extra_threads = std::thread::hardware_concurrency(),
|
||||
.verbose = false,
|
||||
.use_arena = use_arena,
|
||||
.use_less_memory_during_creation = less_memory}});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void test_dynamic_boc(BocOptions options) {
|
||||
auto counter = [] { return td::NamedThreadSafeCounter::get_default().get_counter("DataCell").sum(); };
|
||||
auto before = counter();
|
||||
SCOPE_EXIT {
|
||||
LOG_CHECK((options.o_in_memory && options.o_in_memory->use_arena) || before == counter())
|
||||
<< before << " vs " << counter();
|
||||
};
|
||||
td::Random::Xorshift128plus rnd{options.seed};
|
||||
std::string old_root_hash;
|
||||
std::string old_root_serialization;
|
||||
auto kv = std::make_shared<td::MemoryKeyValue>();
|
||||
auto dboc = DynamicBagOfCellsDb::create();
|
||||
auto create_dboc = [&]() {
|
||||
auto roots_n = old_root_hash.empty() ? 0 : 1;
|
||||
return options.create_dboc(kv.get(), roots_n);
|
||||
};
|
||||
auto dboc = create_dboc();
|
||||
dboc->set_loader(std::make_unique<CellLoader>(kv));
|
||||
for (int t = 1000; t >= 0; t--) {
|
||||
if (rnd() % 10 == 0) {
|
||||
dboc = DynamicBagOfCellsDb::create();
|
||||
dboc = create_dboc();
|
||||
}
|
||||
dboc->set_loader(std::make_unique<CellLoader>(kv));
|
||||
Ref<Cell> old_root;
|
||||
|
@ -795,29 +1075,41 @@ TEST(TonDb, DynamicBoc) {
|
|||
if (t != 0) {
|
||||
dboc->inc(cell);
|
||||
}
|
||||
dboc->prepare_commit();
|
||||
dboc->prepare_commit().ensure();
|
||||
{
|
||||
CellStorer cell_storer(*kv);
|
||||
dboc->commit(cell_storer);
|
||||
dboc->commit(cell_storer).ensure();
|
||||
}
|
||||
}
|
||||
ASSERT_EQ(0u, kv->count("").ok());
|
||||
}
|
||||
|
||||
TEST(TonDb, DynamicBoc) {
|
||||
with_all_boc_options(test_dynamic_boc, 1);
|
||||
};
|
||||
|
||||
TEST(TonDb, DynamicBoc2) {
|
||||
int VERBOSITY_NAME(boc) = VERBOSITY_NAME(DEBUG) + 10;
|
||||
td::Random::Xorshift128plus rnd{123};
|
||||
int total_roots = 10000;
|
||||
int max_roots = 20;
|
||||
std::vector<std::string> root_hashes(max_roots);
|
||||
std::vector<Ref<Cell>> roots(max_roots);
|
||||
void test_dynamic_boc2(BocOptions options) {
|
||||
td::Random::Xorshift128plus rnd{options.seed};
|
||||
|
||||
int total_roots = rnd.fast(1, !rnd.fast(0, 10) * 100 + 10);
|
||||
int max_roots = rnd.fast(1, 20);
|
||||
int last_commit_at = 0;
|
||||
int first_root_id = 0;
|
||||
int last_root_id = 0;
|
||||
auto kv = std::make_shared<td::MemoryKeyValue>();
|
||||
auto dboc = DynamicBagOfCellsDb::create();
|
||||
auto create_dboc = [&](td::int64 root_n) { return options.create_dboc(kv.get(), root_n); };
|
||||
auto dboc = create_dboc(0);
|
||||
dboc->set_loader(std::make_unique<CellLoader>(kv));
|
||||
|
||||
auto counter = [] { return td::NamedThreadSafeCounter::get_default().get_counter("DataCell").sum(); };
|
||||
auto before = counter();
|
||||
SCOPE_EXIT{
|
||||
// LOG_CHECK((options.o_in_memory && options.o_in_memory->use_arena) || before == counter())
|
||||
// << before << " vs " << counter();
|
||||
};
|
||||
|
||||
std::vector<Ref<Cell>> roots(max_roots);
|
||||
std::vector<std::string> root_hashes(max_roots);
|
||||
auto add_root = [&](Ref<Cell> root) {
|
||||
dboc->inc(root);
|
||||
root_hashes[last_root_id % max_roots] = (root->get_hash().as_slice().str());
|
||||
|
@ -825,18 +1117,23 @@ TEST(TonDb, DynamicBoc2) {
|
|||
last_root_id++;
|
||||
};
|
||||
|
||||
auto get_root = [&](int root_id) {
|
||||
auto get_root = [&](int root_id) -> Ref<Cell> {
|
||||
VLOG(boc) << " from older root #" << root_id;
|
||||
auto from_root = roots[root_id % max_roots];
|
||||
if (from_root.is_null()) {
|
||||
VLOG(boc) << " from db";
|
||||
auto from_root_hash = root_hashes[root_id % max_roots];
|
||||
if (rnd() % 2 == 0) {
|
||||
from_root = dboc->load_root(from_root_hash).move_as_ok();
|
||||
} else {
|
||||
from_root = dboc->load_cell(from_root_hash).move_as_ok();
|
||||
}
|
||||
} else {
|
||||
VLOG(boc) << "FROM MEMORY";
|
||||
}
|
||||
return from_root;
|
||||
};
|
||||
std::map<CellHash, int> root_cnt;
|
||||
auto new_root = [&] {
|
||||
if (last_root_id == total_roots) {
|
||||
return;
|
||||
|
@ -850,13 +1147,16 @@ TEST(TonDb, DynamicBoc2) {
|
|||
from_root = get_root(rnd.fast(first_root_id, last_root_id - 1));
|
||||
}
|
||||
VLOG(boc) << " ...";
|
||||
add_root(gen_random_cell(rnd.fast(1, 20), from_root, rnd));
|
||||
auto new_root = gen_random_cell(rnd.fast(1, 20), from_root, rnd);
|
||||
root_cnt[new_root->get_hash()]++;
|
||||
add_root(std::move(new_root));
|
||||
VLOG(boc) << " OK";
|
||||
};
|
||||
|
||||
auto commit = [&] {
|
||||
VLOG(boc) << "commit";
|
||||
dboc->prepare_commit();
|
||||
//rnd.fast(0, 1);
|
||||
options.prepare_commit(*dboc);
|
||||
{
|
||||
CellStorer cell_storer(*kv);
|
||||
dboc->commit(cell_storer);
|
||||
|
@ -870,7 +1170,7 @@ TEST(TonDb, DynamicBoc2) {
|
|||
auto reset = [&] {
|
||||
VLOG(boc) << "reset";
|
||||
commit();
|
||||
dboc = DynamicBagOfCellsDb::create();
|
||||
dboc = create_dboc(td::int64(root_cnt.size()));
|
||||
dboc->set_loader(std::make_unique<CellLoader>(kv));
|
||||
};
|
||||
|
||||
|
@ -879,7 +1179,15 @@ TEST(TonDb, DynamicBoc2) {
|
|||
if (first_root_id == last_root_id) {
|
||||
return;
|
||||
}
|
||||
dboc->dec(get_root(first_root_id));
|
||||
auto old_root = get_root(first_root_id);
|
||||
auto it = root_cnt.find(old_root->get_hash());
|
||||
it->second--;
|
||||
CHECK(it->second >= 0);
|
||||
if (it->second == 0) {
|
||||
root_cnt.erase(it);
|
||||
}
|
||||
|
||||
dboc->dec(std::move(old_root));
|
||||
first_root_id++;
|
||||
VLOG(boc) << " OK";
|
||||
};
|
||||
|
@ -893,6 +1201,10 @@ TEST(TonDb, DynamicBoc2) {
|
|||
ASSERT_EQ(0u, kv->count("").ok());
|
||||
}
|
||||
|
||||
TEST(TonDb, DynamicBoc2) {
|
||||
with_all_boc_options(test_dynamic_boc2);
|
||||
}
|
||||
|
||||
template <class BocDeserializerT>
|
||||
td::Status test_boc_deserializer(std::vector<Ref<Cell>> cells, int mode) {
|
||||
auto total_data_cells_before = vm::DataCell::get_total_data_cells();
|
||||
|
@ -1848,7 +2160,7 @@ TEST(TonDb, CompactArrayOld) {
|
|||
SCOPE_EXIT {
|
||||
ton_db->commit_transaction(std::move(txn));
|
||||
};
|
||||
auto smart = txn->begin_smartcontract("");
|
||||
auto smart = txn->begin_smartcontract();
|
||||
SCOPE_EXIT {
|
||||
txn->commit_smartcontract(std::move(smart));
|
||||
};
|
||||
|
@ -1875,7 +2187,7 @@ TEST(TonDb, CompactArrayOld) {
|
|||
SCOPE_EXIT {
|
||||
ton_db->commit_transaction(std::move(txn));
|
||||
};
|
||||
auto smart = txn->begin_smartcontract("");
|
||||
auto smart = txn->begin_smartcontract();
|
||||
//smart->validate_meta();
|
||||
SCOPE_EXIT {
|
||||
txn->commit_smartcontract(std::move(smart));
|
||||
|
@ -1896,7 +2208,7 @@ TEST(TonDb, CompactArrayOld) {
|
|||
SCOPE_EXIT {
|
||||
ton_db->abort_transaction(std::move(txn));
|
||||
};
|
||||
auto smart = txn->begin_smartcontract("");
|
||||
auto smart = txn->begin_smartcontract();
|
||||
SCOPE_EXIT {
|
||||
txn->abort_smartcontract(std::move(smart));
|
||||
};
|
||||
|
@ -1950,17 +2262,18 @@ TEST(TonDb, BocRespectsUsageCell) {
|
|||
ASSERT_STREQ(serialization, serialization_of_virtualized_cell);
|
||||
}
|
||||
|
||||
TEST(TonDb, DynamicBocRespectsUsageCell) {
|
||||
td::Random::Xorshift128plus rnd(123);
|
||||
void test_dynamic_boc_respectes_usage_cell(vm::BocOptions options) {
|
||||
td::Random::Xorshift128plus rnd(options.seed);
|
||||
auto cell = vm::gen_random_cell(20, rnd, true);
|
||||
auto usage_tree = std::make_shared<vm::CellUsageTree>();
|
||||
auto usage_cell = vm::UsageCell::create(cell, usage_tree->root_ptr());
|
||||
|
||||
auto kv = std::make_shared<td::MemoryKeyValue>();
|
||||
auto dboc = vm::DynamicBagOfCellsDb::create();
|
||||
auto dboc = options.create_dboc(kv.get(), {});
|
||||
dboc->set_loader(std::make_unique<vm::CellLoader>(kv));
|
||||
dboc->inc(usage_cell);
|
||||
{
|
||||
options.prepare_commit(*dboc);
|
||||
vm::CellStorer cell_storer(*kv);
|
||||
dboc->commit(cell_storer);
|
||||
}
|
||||
|
@ -1972,6 +2285,42 @@ TEST(TonDb, DynamicBocRespectsUsageCell) {
|
|||
ASSERT_STREQ(serialization, serialization_of_virtualized_cell);
|
||||
}
|
||||
|
||||
TEST(TonDb, DynamicBocRespectsUsageCell) {
|
||||
vm::with_all_boc_options(test_dynamic_boc_respectes_usage_cell, 20);
|
||||
}
|
||||
|
||||
TEST(TonDb, LargeBocSerializer) {
|
||||
td::Random::Xorshift128plus rnd{123};
|
||||
size_t n = 1000000;
|
||||
std::vector<td::uint64> data(n);
|
||||
std::iota(data.begin(), data.end(), 0);
|
||||
vm::CompactArray arr(data);
|
||||
auto root = arr.root();
|
||||
std::string path = "serialization";
|
||||
td::unlink(path).ignore();
|
||||
auto fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
|
||||
.move_as_ok();
|
||||
std_boc_serialize_to_file(root, fd, 31);
|
||||
fd.close();
|
||||
auto a = td::read_file_str(path).move_as_ok();
|
||||
|
||||
auto kv = std::make_shared<td::MemoryKeyValue>();
|
||||
auto dboc = vm::DynamicBagOfCellsDb::create();
|
||||
dboc->set_loader(std::make_unique<vm::CellLoader>(kv));
|
||||
dboc->inc(root);
|
||||
dboc->prepare_commit();
|
||||
vm::CellStorer cell_storer(*kv);
|
||||
dboc->commit(cell_storer);
|
||||
dboc->set_loader(std::make_unique<vm::CellLoader>(kv));
|
||||
td::unlink(path).ignore();
|
||||
fd = td::FileFd::open(path, td::FileFd::Flags::Create | td::FileFd::Flags::Truncate | td::FileFd::Flags::Write)
|
||||
.move_as_ok();
|
||||
std_boc_serialize_to_file_large(dboc->get_cell_db_reader(), root->get_hash(), fd, 31);
|
||||
fd.close();
|
||||
auto b = td::read_file_str(path).move_as_ok();
|
||||
CHECK(a == b);
|
||||
}
|
||||
|
||||
TEST(TonDb, DoNotMakeListsPrunned) {
|
||||
auto cell = vm::CellBuilder().store_bytes("abc").finalize();
|
||||
auto is_prunned = [&](const td::Ref<vm::Cell> &cell) { return true; };
|
||||
|
@ -2020,7 +2369,7 @@ TEST(TonDb, CellStat) {
|
|||
ASSERT_EQ(stat.cells, new_stat.get_stat().cells);
|
||||
ASSERT_EQ(stat.bits, new_stat.get_stat().bits);
|
||||
|
||||
CHECK(usage_tree.unique());
|
||||
CHECK(usage_tree.use_count() == 1);
|
||||
usage_tree.reset();
|
||||
td::Ref<vm::Cell> C, BC, C_proof;
|
||||
std::shared_ptr<vm::CellUsageTree> usage_tree_B;
|
||||
|
@ -2057,7 +2406,6 @@ TEST(Ref, AtomicRef) {
|
|||
int threads_n = 10;
|
||||
std::vector<Node> nodes(threads_n);
|
||||
std::vector<td::thread> threads(threads_n);
|
||||
int thread_id = 0;
|
||||
for (auto &thread : threads) {
|
||||
thread = td::thread([&] {
|
||||
for (int i = 0; i < 1000000; i++) {
|
||||
|
@ -2072,7 +2420,6 @@ TEST(Ref, AtomicRef) {
|
|||
}
|
||||
}
|
||||
});
|
||||
thread_id++;
|
||||
}
|
||||
for (auto &thread : threads) {
|
||||
thread.join();
|
||||
|
|
|
@ -1316,7 +1316,7 @@ void CppTypeCode::clear_context() {
|
|||
std::string CppTypeCode::new_tmp_var() {
|
||||
char buffer[16];
|
||||
while (true) {
|
||||
sprintf(buffer, "t%d", ++tmp_ints);
|
||||
snprintf(buffer, sizeof(buffer), "t%d", ++tmp_ints);
|
||||
if (tmp_cpp_ids.is_good_ident(buffer) && local_cpp_ids.is_good_ident(buffer)) {
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -420,7 +420,7 @@ void AdmissibilityInfo::operator|=(const AdmissibilityInfo& other) {
|
|||
std::size_t i, j, n = info.size(), n1 = other.info.size();
|
||||
assert(n1 && !(n1 & (n1 - 1)));
|
||||
for (i = j = 0; i < n; i++) {
|
||||
info[i] = info[i] | other.info[j];
|
||||
info[i] = info[i] || other.info[j];
|
||||
j = (j + 1) & (n1 - 1);
|
||||
}
|
||||
}
|
||||
|
@ -2511,7 +2511,7 @@ void define_builtins() {
|
|||
Bits_type = define_builtin_type("bits", "#", false, 1023, 0, true, 0);
|
||||
for (int i = 1; i <= 257; i++) {
|
||||
char buff[8];
|
||||
sprintf(buff, "uint%d", i);
|
||||
snprintf(buff, sizeof(buff), "uint%d", i);
|
||||
define_builtin_type(buff + 1, "", false, i, i, true, -1);
|
||||
if (i < 257) {
|
||||
define_builtin_type(buff, "", false, i, i, true, 1);
|
||||
|
@ -2519,7 +2519,7 @@ void define_builtins() {
|
|||
}
|
||||
for (int i = 1; i <= 1023; i++) {
|
||||
char buff[12];
|
||||
sprintf(buff, "bits%d", i);
|
||||
snprintf(buff, sizeof(buff), "bits%d", i);
|
||||
define_builtin_type(buff, "", false, i, i, true, 0);
|
||||
}
|
||||
Eq_type = define_builtin_type("=", "##", false, 0, 0, true);
|
||||
|
|
395
crypto/util/mintless-proof-generator.cpp
Normal file
395
crypto/util/mintless-proof-generator.cpp
Normal file
|
@ -0,0 +1,395 @@
|
|||
/*
|
||||
This file is part of TON Blockchain Library.
|
||||
|
||||
TON Blockchain Library is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Lesser General Public License as published by
|
||||
the Free Software Foundation, either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
TON Blockchain Library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public License
|
||||
along with TON Blockchain Library. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "block-parse.h"
|
||||
#include "block.h"
|
||||
#include "td/actor/core/Actor.h"
|
||||
#include "td/db/utils/BlobView.h"
|
||||
|
||||
#include <iostream>
|
||||
#include "td/utils/OptionParser.h"
|
||||
#include "td/utils/Time.h"
|
||||
#include "td/utils/base64.h"
|
||||
#include "td/utils/filesystem.h"
|
||||
#include "td/utils/logging.h"
|
||||
#include "vm/cells/MerkleProof.h"
|
||||
#include "vm/db/StaticBagOfCellsDb.h"
|
||||
|
||||
#include <fstream>
|
||||
#include <common/delay.h>
|
||||
|
||||
const size_t KEY_LEN = 3 + 8 + 256;
|
||||
|
||||
void print_help() {
|
||||
std::cerr << "mintless-proof-generator - generates proofs for mintless jettons. Usage:\n\n";
|
||||
std::cerr << "mintless-proof-generator generate <input-list> <output-file>\n";
|
||||
std::cerr << " Generate a full tree for <input-list>, save boc to <output-file>.\n";
|
||||
std::cerr << " Input format: each line is <address> <amount> <start_from> <expired_at>.\n\n";
|
||||
std::cerr << "mintless-proof-generator make_proof <input-boc> <address> <output-file>.\n";
|
||||
std::cerr << " Generate a proof for address <address> from tree <input-boc>, save boc to file <output-file>.\n\n";
|
||||
std::cerr << "mintless-proof-generator parse <input-boc> <output-file>\n";
|
||||
std::cerr << " Read a tree from <input-boc> and output it as text to <output-file>.\n";
|
||||
std::cerr << " Output format: same as input for 'generate'.\n\n";
|
||||
std::cerr << "mintless-proof-generator make_all_proofs <input-boc> <output-file> [--threads <threads>]\n";
|
||||
std::cerr << " Read a tree from <input-boc> and output proofs for all accounts to <output-file>.\n";
|
||||
std::cerr << " Output format: <address>,<proof-base64>\n";
|
||||
std::cerr << " Default <threads>: 1\n";
|
||||
exit(2);
|
||||
}
|
||||
|
||||
void log_mem_stat() {
|
||||
auto r_stat = td::mem_stat();
|
||||
if (r_stat.is_error()) {
|
||||
LOG(WARNING) << "Memory: " << r_stat.move_as_error();
|
||||
return;
|
||||
}
|
||||
auto stat = r_stat.move_as_ok();
|
||||
LOG(WARNING) << "Memory: "
|
||||
<< "res=" << stat.resident_size_ << " (peak=" << stat.resident_size_peak_
|
||||
<< ") virt=" << stat.virtual_size_ << " (peak=" << stat.virtual_size_peak_ << ")";
|
||||
}
|
||||
|
||||
td::BitArray<KEY_LEN> address_to_key(const block::StdAddress &address) {
|
||||
// addr_std$10 anycast:(Maybe Anycast) workchain_id:int8 address:bits256 = MsgAddressInt;
|
||||
vm::CellBuilder cb;
|
||||
cb.store_long(0b100, 3);
|
||||
cb.store_long(address.workchain, 8);
|
||||
cb.store_bits(address.addr.as_bitslice());
|
||||
return cb.data_bits();
|
||||
}
|
||||
|
||||
block::StdAddress key_to_address(const td::BitArray<KEY_LEN> &key) {
|
||||
block::StdAddress addr;
|
||||
td::ConstBitPtr ptr = key.bits();
|
||||
LOG_CHECK(ptr.get_uint(3) == 0b100) << "Invalid address";
|
||||
ptr.advance(3);
|
||||
addr.workchain = (ton::WorkchainId)ptr.get_int(8);
|
||||
ptr.advance(8);
|
||||
addr.addr = ptr;
|
||||
return addr;
|
||||
}
|
||||
|
||||
struct Entry {
|
||||
block::StdAddress address;
|
||||
td::RefInt256 amount;
|
||||
td::uint64 start_from = 0, expired_at = 0;
|
||||
|
||||
td::BitArray<KEY_LEN> get_key() const {
|
||||
return address_to_key(address);
|
||||
}
|
||||
|
||||
td::Ref<vm::CellSlice> get_value() const {
|
||||
// _ amount:Coins start_from:uint48 expired_at:uint48 = AirdropItem;
|
||||
vm::CellBuilder cb;
|
||||
bool ok = block::tlb::t_Grams.store_integer_value(cb, *amount) && cb.store_ulong_rchk_bool(start_from, 48) &&
|
||||
cb.store_ulong_rchk_bool(expired_at, 48);
|
||||
LOG_CHECK(ok) << "Failed to serialize AirdropItem";
|
||||
return cb.as_cellslice_ref();
|
||||
}
|
||||
|
||||
static Entry parse(const td::BitArray<KEY_LEN> &key, vm::CellSlice value) {
|
||||
Entry e;
|
||||
e.address = key_to_address(key);
|
||||
bool ok = block::tlb::t_Grams.as_integer_skip_to(value, e.amount) && value.fetch_uint_to(48, e.start_from) &&
|
||||
value.fetch_uint_to(48, e.expired_at) && value.empty_ext();
|
||||
LOG_CHECK(ok) << "Failed to parse AirdropItem";
|
||||
return e;
|
||||
}
|
||||
};
|
||||
|
||||
bool read_entry(std::istream &f, Entry &entry) {
|
||||
std::string line;
|
||||
while (std::getline(f, line)) {
|
||||
std::vector<std::string> v = td::full_split(line, ' ');
|
||||
if (v.empty()) {
|
||||
continue;
|
||||
}
|
||||
auto S = [&]() -> td::Status {
|
||||
if (v.size() != 4) {
|
||||
return td::Status::Error("Invalid line in input");
|
||||
}
|
||||
TRY_RESULT_PREFIX_ASSIGN(entry.address, block::StdAddress::parse(v[0]), "Invalid address in input: ");
|
||||
entry.amount = td::string_to_int256(v[1]);
|
||||
if (entry.amount.is_null() || !entry.amount->is_valid() || entry.amount->sgn() < 0) {
|
||||
return td::Status::Error(PSTRING() << "Invalid amount in input: " << v[1]);
|
||||
}
|
||||
TRY_RESULT_PREFIX_ASSIGN(entry.start_from, td::to_integer_safe<td::uint64>(v[2]),
|
||||
"Invalid start_from in input: ");
|
||||
TRY_RESULT_PREFIX_ASSIGN(entry.expired_at, td::to_integer_safe<td::uint64>(v[3]),
|
||||
"Invalid expired_at in input: ");
|
||||
return td::Status::OK();
|
||||
}();
|
||||
S.ensure();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
td::Status run_generate(std::string in_filename, std::string out_filename) {
|
||||
LOG(INFO) << "Generating tree from " << in_filename;
|
||||
std::ifstream in_file{in_filename};
|
||||
LOG_CHECK(in_file.is_open()) << "Cannot open file " << in_filename;
|
||||
|
||||
Entry entry;
|
||||
vm::Dictionary dict{KEY_LEN};
|
||||
td::uint64 count = 0;
|
||||
td::Timestamp log_at = td::Timestamp::in(5.0);
|
||||
while (read_entry(in_file, entry)) {
|
||||
++count;
|
||||
bool ok = dict.set(entry.get_key(), entry.get_value(), vm::DictionaryBase::SetMode::Add);
|
||||
LOG_CHECK(ok) << "Failed to add entry " << entry.address.rserialize() << " (line #" << count << ")";
|
||||
if (log_at.is_in_past()) {
|
||||
LOG(INFO) << "Added " << count << " entries";
|
||||
log_at = td::Timestamp::in(5.0);
|
||||
}
|
||||
}
|
||||
LOG_CHECK(in_file.eof()) << "Failed to read file " << in_filename;
|
||||
in_file.close();
|
||||
|
||||
LOG_CHECK(count != 0) << "Input is empty";
|
||||
td::Ref<vm::Cell> root = dict.get_root_cell();
|
||||
LOG(INFO) << "Total: " << count << " entries, root hash: " << root->get_hash().to_hex();
|
||||
vm::BagOfCells boc;
|
||||
boc.add_root(root);
|
||||
TRY_STATUS(boc.import_cells());
|
||||
LOG(INFO) << "Writing to " << out_filename;
|
||||
TRY_RESULT(fd, td::FileFd::open(out_filename, td::FileFd::Write | td::FileFd::Truncate | td::FileFd::Create));
|
||||
TRY_STATUS(boc.serialize_to_file(fd, 31));
|
||||
TRY_STATUS(fd.sync());
|
||||
fd.close();
|
||||
log_mem_stat();
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status run_make_proof(std::string in_filename, std::string s_address, std::string out_filename) {
|
||||
LOG(INFO) << "Generating proof for " << s_address << ", input file is " << in_filename;
|
||||
TRY_RESULT(address, block::StdAddress::parse(s_address));
|
||||
|
||||
TRY_RESULT(blob_view, td::FileBlobView::create(in_filename));
|
||||
TRY_RESULT(boc, vm::StaticBagOfCellsDbLazy::create(std::move(blob_view)));
|
||||
TRY_RESULT(root, boc->get_root_cell(0));
|
||||
|
||||
vm::MerkleProofBuilder mpb{root};
|
||||
vm::Dictionary dict{mpb.root(), KEY_LEN};
|
||||
auto key = address_to_key(address);
|
||||
td::Ref<vm::CellSlice> value = dict.lookup(key);
|
||||
LOG_CHECK(value.not_null()) << "No entry for address " << s_address;
|
||||
Entry e = Entry::parse(key, *value);
|
||||
LOG(INFO) << "Entry: address=" << e.address.workchain << ":" << e.address.addr.to_hex()
|
||||
<< " amount=" << e.amount->to_dec_string() << " start_from=" << e.start_from
|
||||
<< " expire_at=" << e.expired_at;
|
||||
|
||||
TRY_RESULT(proof, mpb.extract_proof_boc());
|
||||
LOG(INFO) << "Writing proof to " << out_filename << " (" << td::format::as_size(proof.size()) << ")";
|
||||
TRY_STATUS(td::write_file(out_filename, proof));
|
||||
log_mem_stat();
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status run_parse(std::string in_filename, std::string out_filename) {
|
||||
LOG(INFO) << "Parsing " << in_filename;
|
||||
std::ofstream out_file{out_filename};
|
||||
LOG_CHECK(out_file.is_open()) << "Cannot open file " << out_filename;
|
||||
|
||||
TRY_RESULT(blob_view, td::FileBlobView::create(in_filename));
|
||||
TRY_RESULT(boc, vm::StaticBagOfCellsDbLazy::create(std::move(blob_view)));
|
||||
TRY_RESULT(root, boc->get_root_cell(0));
|
||||
LOG(INFO) << "Root hash = " << root->get_hash().to_hex();
|
||||
vm::Dictionary dict{root, KEY_LEN};
|
||||
td::Timestamp log_at = td::Timestamp::in(5.0);
|
||||
td::uint64 count = 0;
|
||||
bool ok = dict.check_for_each([&](td::Ref<vm::CellSlice> value, td::ConstBitPtr key, int key_len) {
|
||||
CHECK(key_len == KEY_LEN);
|
||||
Entry e = Entry::parse(key, *value);
|
||||
out_file << e.address.workchain << ":" << e.address.addr.to_hex() << " " << e.amount->to_dec_string() << " "
|
||||
<< e.start_from << " " << e.expired_at << "\n";
|
||||
LOG_CHECK(!out_file.fail()) << "Failed to write to " << out_filename;
|
||||
++count;
|
||||
if (log_at.is_in_past()) {
|
||||
LOG(INFO) << "Parsed " << count << " entries";
|
||||
log_at = td::Timestamp::in(5.0);
|
||||
}
|
||||
return true;
|
||||
});
|
||||
LOG_CHECK(ok) << "Failed to parse dictionary";
|
||||
out_file.close();
|
||||
LOG_CHECK(!out_file.fail()) << "Failed to write to " << out_filename;
|
||||
LOG(INFO) << "Written " << count << " entries to " << out_filename;
|
||||
log_mem_stat();
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
class MakeAllProofsActor : public td::actor::core::Actor {
|
||||
public:
|
||||
MakeAllProofsActor(std::string in_filename, std::string out_filename, int max_workers)
|
||||
: in_filename_(in_filename), out_filename_(out_filename), max_workers_(max_workers) {
|
||||
}
|
||||
|
||||
void start_up() override {
|
||||
auto S = [&]() -> td::Status {
|
||||
out_file_.open(out_filename_);
|
||||
LOG_CHECK(out_file_.is_open()) << "Cannot open file " << out_filename_;
|
||||
LOG(INFO) << "Reading " << in_filename_;
|
||||
TRY_RESULT(blob_view, td::FileBlobView::create(in_filename_));
|
||||
TRY_RESULT(boc, vm::StaticBagOfCellsDbLazy::create(std::move(blob_view)));
|
||||
TRY_RESULT(root, boc->get_root_cell(0));
|
||||
LOG(INFO) << "Root hash = " << root->get_hash().to_hex();
|
||||
dict_ = vm::Dictionary{root, KEY_LEN};
|
||||
return td::Status::OK();
|
||||
}();
|
||||
S.ensure();
|
||||
run();
|
||||
alarm_timestamp() = td::Timestamp::in(5.0);
|
||||
}
|
||||
|
||||
void alarm() override {
|
||||
alarm_timestamp() = td::Timestamp::in(5.0);
|
||||
LOG(INFO) << "Processed " << written_count_ << " entries";
|
||||
}
|
||||
|
||||
void run() {
|
||||
for (auto it = pending_results_.begin(); it != pending_results_.end() && !it->second.empty();) {
|
||||
out_file_ << it->second << "\n";
|
||||
LOG_CHECK(!out_file_.fail()) << "Failed to write to " << out_filename_;
|
||||
it = pending_results_.erase(it);
|
||||
++written_count_;
|
||||
}
|
||||
while (active_workers_ < max_workers_ && !eof_) {
|
||||
td::Ref<vm::CellSlice> value = dict_.lookup_nearest_key(current_key_, true, current_idx_ == 0);
|
||||
if (value.is_null()) {
|
||||
eof_ = true;
|
||||
break;
|
||||
}
|
||||
run_worker(current_key_, current_idx_);
|
||||
++current_idx_;
|
||||
++active_workers_;
|
||||
}
|
||||
if (eof_ && active_workers_ == 0) {
|
||||
out_file_.close();
|
||||
LOG_CHECK(!out_file_.fail()) << "Failed to write to " << out_filename_;
|
||||
LOG(INFO) << "Written " << written_count_ << " entries to " << out_filename_;
|
||||
stop();
|
||||
td::actor::SchedulerContext::get()->stop();
|
||||
}
|
||||
}
|
||||
|
||||
void run_worker(td::BitArray<KEY_LEN> key, td::uint64 idx) {
|
||||
pending_results_[idx] = "";
|
||||
ton::delay_action(
|
||||
[SelfId = actor_id(this), key, idx, root = dict_.get_root_cell()]() {
|
||||
vm::MerkleProofBuilder mpb{root};
|
||||
CHECK(vm::Dictionary(mpb.root(), KEY_LEN).lookup(key).not_null());
|
||||
auto r_proof = mpb.extract_proof_boc();
|
||||
r_proof.ensure();
|
||||
block::StdAddress addr = key_to_address(key);
|
||||
std::string result = PSTRING() << addr.workchain << ":" << addr.addr.to_hex() << ","
|
||||
<< td::base64_encode(r_proof.move_as_ok());
|
||||
td::actor::send_closure(SelfId, &MakeAllProofsActor::on_result, idx, std::move(result));
|
||||
},
|
||||
td::Timestamp::now());
|
||||
}
|
||||
|
||||
void on_result(td::uint64 idx, std::string result) {
|
||||
pending_results_[idx] = std::move(result);
|
||||
--active_workers_;
|
||||
run();
|
||||
}
|
||||
|
||||
private:
|
||||
std::string in_filename_, out_filename_;
|
||||
int max_workers_;
|
||||
|
||||
std::ofstream out_file_;
|
||||
vm::Dictionary dict_{KEY_LEN};
|
||||
td::BitArray<KEY_LEN> current_key_ = td::BitArray<KEY_LEN>::zero();
|
||||
td::uint64 current_idx_ = 0;
|
||||
bool eof_ = false;
|
||||
int active_workers_ = 0;
|
||||
|
||||
std::map<td::uint64, std::string> pending_results_;
|
||||
td::uint64 written_count_ = 0;
|
||||
};
|
||||
|
||||
td::Status run_make_all_proofs(std::string in_filename, std::string out_filename, int threads) {
|
||||
td::actor::Scheduler scheduler({(size_t)threads});
|
||||
scheduler.run_in_context(
|
||||
[&] { td::actor::create_actor<MakeAllProofsActor>("proofs", in_filename, out_filename, threads).release(); });
|
||||
while (scheduler.run(1)) {
|
||||
}
|
||||
log_mem_stat();
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SET_VERBOSITY_LEVEL(verbosity_INFO);
|
||||
td::set_log_fatal_error_callback([](td::CSlice) { exit(2); });
|
||||
if (argc <= 1) {
|
||||
print_help();
|
||||
return 2;
|
||||
}
|
||||
|
||||
std::string command = argv[1];
|
||||
try {
|
||||
if (command == "generate") {
|
||||
if (argc != 4) {
|
||||
print_help();
|
||||
}
|
||||
run_generate(argv[2], argv[3]).ensure();
|
||||
return 0;
|
||||
}
|
||||
if (command == "make_proof") {
|
||||
if (argc != 5) {
|
||||
print_help();
|
||||
}
|
||||
run_make_proof(argv[2], argv[3], argv[4]).ensure();
|
||||
return 0;
|
||||
}
|
||||
if (command == "parse") {
|
||||
if (argc != 4) {
|
||||
print_help();
|
||||
}
|
||||
run_parse(argv[2], argv[3]).ensure();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (command == "make_all_proofs") {
|
||||
std::vector<std::string> args;
|
||||
int threads = 1;
|
||||
for (int i = 2; i < argc; ++i) {
|
||||
if (!strcmp(argv[i], "--threads")) {
|
||||
++i;
|
||||
auto r = td::to_integer_safe<int>(td::as_slice(argv[i]));
|
||||
LOG_CHECK(r.is_ok() && r.ok() >= 1 && r.ok() <= 127) << "<threads> should be in [1..127]";
|
||||
threads = r.move_as_ok();
|
||||
} else {
|
||||
args.push_back(argv[i]);
|
||||
}
|
||||
}
|
||||
if (args.size() != 2) {
|
||||
print_help();
|
||||
}
|
||||
run_make_all_proofs(args[0], args[1], threads).ensure();
|
||||
return 0;
|
||||
}
|
||||
} catch (vm::VmError &e) {
|
||||
LOG(FATAL) << "VM error: " << e.get_msg();
|
||||
} catch (vm::VmVirtError &e) {
|
||||
LOG(FATAL) << "VM error: " << e.get_msg();
|
||||
}
|
||||
|
||||
LOG(FATAL) << "Unknown command '" << command << "'";
|
||||
}
|
|
@ -35,7 +35,7 @@ void Atom::print_to(std::ostream& os) const {
|
|||
|
||||
std::string Atom::make_name() const {
|
||||
char buffer[16];
|
||||
sprintf(buffer, "atom#%d", index_);
|
||||
snprintf(buffer, sizeof(buffer), "atom#%d", index_);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
|
|
|
@ -183,6 +183,9 @@ int BagOfCells::add_root(td::Ref<vm::Cell> add_root) {
|
|||
|
||||
// Changes in this function may require corresponding changes in crypto/vm/large-boc-serializer.cpp
|
||||
td::Status BagOfCells::import_cells() {
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->start_stage("import_cells");
|
||||
}
|
||||
cells_clear();
|
||||
for (auto& root : roots) {
|
||||
auto res = import_cell(root.cell, 0);
|
||||
|
@ -196,6 +199,9 @@ td::Status BagOfCells::import_cells() {
|
|||
//LOG(INFO) << "[cells: " << cell_count << ", refs: " << int_refs << ", bytes: " << data_bytes
|
||||
//<< ", internal hashes: " << int_hashes << ", top hashes: " << top_hashes << "]";
|
||||
CHECK(cell_count != 0);
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->finish_stage(PSLICE() << cell_count << " cells");
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
|
@ -207,6 +213,9 @@ td::Result<int> BagOfCells::import_cell(td::Ref<vm::Cell> cell, int depth) {
|
|||
if (cell.is_null()) {
|
||||
return td::Status::Error("error while importing a cell into a bag of cells: cell is null");
|
||||
}
|
||||
if (logger_ptr_) {
|
||||
TRY_STATUS(logger_ptr_->on_cell_processed());
|
||||
}
|
||||
auto it = cells.find(cell->get_hash());
|
||||
if (it != cells.end()) {
|
||||
auto pos = it->second;
|
||||
|
@ -436,17 +445,19 @@ std::size_t BagOfCells::estimate_serialized_size(int mode) {
|
|||
return res.ok();
|
||||
}
|
||||
|
||||
BagOfCells& BagOfCells::serialize(int mode) {
|
||||
td::Status BagOfCells::serialize(int mode) {
|
||||
std::size_t size_est = estimate_serialized_size(mode);
|
||||
if (!size_est) {
|
||||
serialized.clear();
|
||||
return *this;
|
||||
return td::Status::OK();
|
||||
}
|
||||
serialized.resize(size_est);
|
||||
if (serialize_to(const_cast<unsigned char*>(serialized.data()), serialized.size(), mode) != size_est) {
|
||||
TRY_RESULT(size, serialize_to(const_cast<unsigned char*>(serialized.data()), serialized.size(), mode));
|
||||
if (size != size_est) {
|
||||
serialized.clear();
|
||||
return td::Status::Error("serialization failed");
|
||||
}
|
||||
return *this;
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
std::string BagOfCells::serialize_to_string(int mode) {
|
||||
|
@ -456,8 +467,8 @@ std::string BagOfCells::serialize_to_string(int mode) {
|
|||
}
|
||||
std::string res;
|
||||
res.resize(size_est, 0);
|
||||
if (serialize_to(const_cast<unsigned char*>(reinterpret_cast<const unsigned char*>(res.data())), res.size(), mode) ==
|
||||
res.size()) {
|
||||
if (serialize_to(const_cast<unsigned char*>(reinterpret_cast<const unsigned char*>(res.data())), res.size(), mode)
|
||||
.move_as_ok() == res.size()) {
|
||||
return res;
|
||||
} else {
|
||||
return {};
|
||||
|
@ -470,8 +481,9 @@ td::Result<td::BufferSlice> BagOfCells::serialize_to_slice(int mode) {
|
|||
return td::Status::Error("no cells to serialize to this bag of cells");
|
||||
}
|
||||
td::BufferSlice res(size_est);
|
||||
if (serialize_to(const_cast<unsigned char*>(reinterpret_cast<const unsigned char*>(res.data())), res.size(), mode) ==
|
||||
res.size()) {
|
||||
TRY_RESULT(size, serialize_to(const_cast<unsigned char*>(reinterpret_cast<const unsigned char*>(res.data())),
|
||||
res.size(), mode));
|
||||
if (size == res.size()) {
|
||||
return std::move(res);
|
||||
} else {
|
||||
return td::Status::Error("error while serializing a bag of cells: actual serialized size differs from estimated");
|
||||
|
@ -494,14 +506,10 @@ std::string BagOfCells::extract_string() const {
|
|||
// cell_data:(tot_cells_size * [ uint8 ])
|
||||
// = BagOfCells;
|
||||
// Changes in this function may require corresponding changes in crypto/vm/large-boc-serializer.cpp
|
||||
template<typename WriterT>
|
||||
std::size_t BagOfCells::serialize_to_impl(WriterT& writer, int mode) {
|
||||
auto store_ref = [&](unsigned long long value) {
|
||||
writer.store_uint(value, info.ref_byte_size);
|
||||
};
|
||||
auto store_offset = [&](unsigned long long value) {
|
||||
writer.store_uint(value, info.offset_byte_size);
|
||||
};
|
||||
template <typename WriterT>
|
||||
td::Result<std::size_t> BagOfCells::serialize_to_impl(WriterT& writer, int mode) {
|
||||
auto store_ref = [&](unsigned long long value) { writer.store_uint(value, info.ref_byte_size); };
|
||||
auto store_offset = [&](unsigned long long value) { writer.store_uint(value, info.offset_byte_size); };
|
||||
|
||||
writer.store_uint(info.magic, 4);
|
||||
|
||||
|
@ -536,6 +544,9 @@ std::size_t BagOfCells::serialize_to_impl(WriterT& writer, int mode) {
|
|||
DCHECK((unsigned)cell_count == cell_list_.size());
|
||||
if (info.has_index) {
|
||||
std::size_t offs = 0;
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->start_stage("generate_index");
|
||||
}
|
||||
for (int i = cell_count - 1; i >= 0; --i) {
|
||||
const Ref<DataCell>& dc = cell_list_[i].dc_ref;
|
||||
bool with_hash = (mode & Mode::WithIntHashes) && !cell_list_[i].wt;
|
||||
|
@ -548,11 +559,20 @@ std::size_t BagOfCells::serialize_to_impl(WriterT& writer, int mode) {
|
|||
fixed_offset = offs * 2 + cell_list_[i].should_cache;
|
||||
}
|
||||
store_offset(fixed_offset);
|
||||
if (logger_ptr_) {
|
||||
TRY_STATUS(logger_ptr_->on_cell_processed());
|
||||
}
|
||||
}
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->finish_stage("");
|
||||
}
|
||||
DCHECK(offs == info.data_size);
|
||||
}
|
||||
DCHECK(writer.position() == info.data_offset);
|
||||
size_t keep_position = writer.position();
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->start_stage("serialize");
|
||||
}
|
||||
for (int i = 0; i < cell_count; ++i) {
|
||||
const auto& dc_info = cell_list_[cell_count - 1 - i];
|
||||
const Ref<DataCell>& dc = dc_info.dc_ref;
|
||||
|
@ -572,6 +592,9 @@ std::size_t BagOfCells::serialize_to_impl(WriterT& writer, int mode) {
|
|||
// std::cerr << ' ' << k;
|
||||
}
|
||||
// std::cerr << std::endl;
|
||||
if (logger_ptr_) {
|
||||
TRY_STATUS(logger_ptr_->on_cell_processed());
|
||||
}
|
||||
}
|
||||
writer.chk();
|
||||
DCHECK(writer.position() - keep_position == info.data_size);
|
||||
|
@ -580,11 +603,14 @@ std::size_t BagOfCells::serialize_to_impl(WriterT& writer, int mode) {
|
|||
unsigned crc = writer.get_crc32();
|
||||
writer.store_uint(td::bswap32(crc), 4);
|
||||
}
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->finish_stage(PSLICE() << cell_count << " cells, " << writer.position() << " bytes");
|
||||
}
|
||||
DCHECK(writer.empty());
|
||||
return writer.position();
|
||||
}
|
||||
|
||||
std::size_t BagOfCells::serialize_to(unsigned char* buffer, std::size_t buff_size, int mode) {
|
||||
td::Result<std::size_t> BagOfCells::serialize_to(unsigned char* buffer, std::size_t buff_size, int mode) {
|
||||
std::size_t size_est = estimate_serialized_size(mode);
|
||||
if (!size_est || size_est > buff_size) {
|
||||
return 0;
|
||||
|
@ -599,7 +625,7 @@ td::Status BagOfCells::serialize_to_file(td::FileFd& fd, int mode) {
|
|||
return td::Status::Error("no cells to serialize to this bag of cells");
|
||||
}
|
||||
boc_writers::FileWriter writer{fd, size_est};
|
||||
size_t s = serialize_to_impl(writer, mode);
|
||||
TRY_RESULT(s, serialize_to_impl(writer, mode));
|
||||
TRY_STATUS(writer.finalize());
|
||||
if (s != size_est) {
|
||||
return td::Status::Error("error while serializing a bag of cells: actual serialized size differs from estimated");
|
||||
|
@ -1001,6 +1027,21 @@ td::Result<td::BufferSlice> std_boc_serialize_multi(std::vector<Ref<Cell>> roots
|
|||
}
|
||||
return boc.serialize_to_slice(mode);
|
||||
}
|
||||
td::Status std_boc_serialize_to_file(Ref<Cell> root, td::FileFd& fd, int mode,
|
||||
td::CancellationToken cancellation_token) {
|
||||
if (root.is_null()) {
|
||||
return td::Status::Error("cannot serialize a null cell reference into a bag of cells");
|
||||
}
|
||||
td::Timer timer;
|
||||
BagOfCellsLogger logger(std::move(cancellation_token));
|
||||
BagOfCells boc;
|
||||
boc.set_logger(&logger);
|
||||
boc.add_root(std::move(root));
|
||||
TRY_STATUS(boc.import_cells());
|
||||
TRY_STATUS(boc.serialize_to_file(fd, mode));
|
||||
LOG(ERROR) << "serialization took " << timer.elapsed() << "s";
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
/*
|
||||
*
|
||||
|
@ -1214,4 +1255,35 @@ bool VmStorageStat::add_storage(const CellSlice& cs) {
|
|||
return true;
|
||||
}
|
||||
|
||||
static td::uint64 estimate_prunned_size() {
|
||||
return 41;
|
||||
}
|
||||
|
||||
static td::uint64 estimate_serialized_size(const Ref<DataCell>& cell) {
|
||||
return cell->get_serialized_size() + cell->size_refs() * 3 + 3;
|
||||
}
|
||||
|
||||
void ProofStorageStat::add_cell(const Ref<DataCell>& cell) {
|
||||
auto& status = cells_[cell->get_hash()];
|
||||
if (status == c_loaded) {
|
||||
return;
|
||||
}
|
||||
if (status == c_prunned) {
|
||||
proof_size_ -= estimate_prunned_size();
|
||||
}
|
||||
status = c_loaded;
|
||||
proof_size_ += estimate_serialized_size(cell);
|
||||
for (unsigned i = 0; i < cell->size_refs(); ++i) {
|
||||
auto& child_status = cells_[cell->get_ref(i)->get_hash()];
|
||||
if (child_status == c_none) {
|
||||
child_status = c_prunned;
|
||||
proof_size_ += estimate_prunned_size();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
td::uint64 ProofStorageStat::estimate_proof_size() const {
|
||||
return proof_size_;
|
||||
}
|
||||
|
||||
} // namespace vm
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
#include "td/utils/buffer.h"
|
||||
#include "td/utils/HashMap.h"
|
||||
#include "td/utils/HashSet.h"
|
||||
#include "td/utils/Time.h"
|
||||
#include "td/utils/Timer.h"
|
||||
#include "td/utils/port/FileFd.h"
|
||||
|
||||
namespace vm {
|
||||
|
@ -163,6 +165,18 @@ struct VmStorageStat {
|
|||
}
|
||||
};
|
||||
|
||||
class ProofStorageStat {
|
||||
public:
|
||||
void add_cell(const Ref<DataCell>& cell);
|
||||
td::uint64 estimate_proof_size() const;
|
||||
private:
|
||||
enum CellStatus {
|
||||
c_none = 0, c_prunned = 1, c_loaded = 2
|
||||
};
|
||||
std::map<vm::Cell::Hash, CellStatus> cells_;
|
||||
td::uint64 proof_size_ = 0;
|
||||
};
|
||||
|
||||
struct CellSerializationInfo {
|
||||
bool special;
|
||||
Cell::LevelMask level_mask;
|
||||
|
@ -187,6 +201,43 @@ struct CellSerializationInfo {
|
|||
td::Result<Ref<DataCell>> create_data_cell(td::Slice data, td::Span<Ref<Cell>> refs) const;
|
||||
};
|
||||
|
||||
class BagOfCellsLogger {
|
||||
public:
|
||||
BagOfCellsLogger() = default;
|
||||
explicit BagOfCellsLogger(td::CancellationToken cancellation_token)
|
||||
: cancellation_token_(std::move(cancellation_token)) {
|
||||
}
|
||||
|
||||
void start_stage(std::string stage) {
|
||||
log_speed_at_ = td::Timestamp::in(LOG_SPEED_PERIOD);
|
||||
processed_cells_ = 0;
|
||||
timer_ = {};
|
||||
stage_ = std::move(stage);
|
||||
}
|
||||
void finish_stage(td::Slice desc) {
|
||||
LOG(ERROR) << "serializer: " << stage_ << " took " << timer_.elapsed() << "s, " << desc;
|
||||
}
|
||||
td::Status on_cell_processed() {
|
||||
++processed_cells_;
|
||||
if (processed_cells_ % 1000 == 0) {
|
||||
TRY_STATUS(cancellation_token_.check());
|
||||
}
|
||||
if (log_speed_at_.is_in_past()) {
|
||||
log_speed_at_ += LOG_SPEED_PERIOD;
|
||||
LOG(WARNING) << "serializer: " << stage_ << " " << (double)processed_cells_ / LOG_SPEED_PERIOD << " cells/s";
|
||||
processed_cells_ = 0;
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
private:
|
||||
std::string stage_;
|
||||
td::Timer timer_;
|
||||
td::CancellationToken cancellation_token_;
|
||||
td::Timestamp log_speed_at_;
|
||||
size_t processed_cells_ = 0;
|
||||
static constexpr double LOG_SPEED_PERIOD = 120.0;
|
||||
};
|
||||
class BagOfCells {
|
||||
public:
|
||||
enum { hash_bytes = vm::Cell::hash_bytes, default_max_roots = 16384 };
|
||||
|
@ -271,6 +322,7 @@ class BagOfCells {
|
|||
const unsigned char* index_ptr{nullptr};
|
||||
const unsigned char* data_ptr{nullptr};
|
||||
std::vector<unsigned long long> custom_index;
|
||||
BagOfCellsLogger* logger_ptr_{nullptr};
|
||||
|
||||
public:
|
||||
void clear();
|
||||
|
@ -280,14 +332,17 @@ class BagOfCells {
|
|||
int add_root(td::Ref<vm::Cell> add_root);
|
||||
td::Status import_cells() TD_WARN_UNUSED_RESULT;
|
||||
BagOfCells() = default;
|
||||
void set_logger(BagOfCellsLogger* logger_ptr) {
|
||||
logger_ptr_ = logger_ptr;
|
||||
}
|
||||
std::size_t estimate_serialized_size(int mode = 0);
|
||||
BagOfCells& serialize(int mode = 0);
|
||||
std::string serialize_to_string(int mode = 0);
|
||||
td::Status serialize(int mode = 0);
|
||||
td::string serialize_to_string(int mode = 0);
|
||||
td::Result<td::BufferSlice> serialize_to_slice(int mode = 0);
|
||||
std::size_t serialize_to(unsigned char* buffer, std::size_t buff_size, int mode = 0);
|
||||
td::Result<std::size_t> serialize_to(unsigned char* buffer, std::size_t buff_size, int mode = 0);
|
||||
td::Status serialize_to_file(td::FileFd& fd, int mode = 0);
|
||||
template<typename WriterT>
|
||||
std::size_t serialize_to_impl(WriterT& writer, int mode = 0);
|
||||
template <typename WriterT>
|
||||
td::Result<std::size_t> serialize_to_impl(WriterT& writer, int mode = 0);
|
||||
std::string extract_string() const;
|
||||
|
||||
td::Result<long long> deserialize(const td::Slice& data, int max_roots = default_max_roots);
|
||||
|
@ -333,6 +388,8 @@ td::Result<std::vector<Ref<Cell>>> std_boc_deserialize_multi(td::Slice data,
|
|||
int max_roots = BagOfCells::default_max_roots);
|
||||
td::Result<td::BufferSlice> std_boc_serialize_multi(std::vector<Ref<Cell>> root, int mode = 0);
|
||||
|
||||
td::Status std_boc_serialize_to_file(Ref<Cell> root, td::FileFd& fd, int mode = 0,
|
||||
td::CancellationToken cancellation_token = {});
|
||||
td::Status std_boc_serialize_to_file_large(std::shared_ptr<CellDbReader> reader, Cell::Hash root_hash, td::FileFd& fd,
|
||||
int mode = 0, td::CancellationToken cancellation_token = {});
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#pragma once
|
||||
#include "common/refcnt.hpp"
|
||||
#include "common/bitstring.h"
|
||||
#include "td/utils/HashSet.h"
|
||||
|
||||
#include "vm/cells/CellHash.h"
|
||||
#include "vm/cells/CellTraits.h"
|
||||
|
@ -86,4 +87,31 @@ class Cell : public CellTraits {
|
|||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Cell& c);
|
||||
|
||||
using is_transparent = void; // Pred to use
|
||||
inline vm::CellHash as_cell_hash(const Ref<Cell>& cell) {
|
||||
return cell->get_hash();
|
||||
}
|
||||
inline vm::CellHash as_cell_hash(td::Slice hash) {
|
||||
return vm::CellHash::from_slice(hash);
|
||||
}
|
||||
inline vm::CellHash as_cell_hash(vm::CellHash hash) {
|
||||
return hash;
|
||||
}
|
||||
struct CellEqF {
|
||||
using is_transparent = void; // Pred to use
|
||||
template <class A, class B>
|
||||
bool operator()(const A& a, const B& b) const {
|
||||
return as_cell_hash(a) == as_cell_hash(b);
|
||||
}
|
||||
};
|
||||
struct CellHashF {
|
||||
using is_transparent = void; // Pred to use
|
||||
using transparent_key_equal = CellEqF;
|
||||
template <class T>
|
||||
size_t operator()(const T& value) const {
|
||||
return cell_hash_slice_hash(as_cell_hash(value).as_slice());
|
||||
}
|
||||
};
|
||||
using CellHashSet = td::HashSet<td::Ref<Cell>, CellHashF, CellEqF>;
|
||||
} // namespace vm
|
||||
|
|
|
@ -617,7 +617,7 @@ std::string CellBuilder::to_hex() const {
|
|||
int len = serialize(buff, sizeof(buff));
|
||||
char hex_buff[Cell::max_serialized_bytes * 2 + 1];
|
||||
for (int i = 0; i < len; i++) {
|
||||
sprintf(hex_buff + 2 * i, "%02x", buff[i]);
|
||||
snprintf(hex_buff + 2 * i, sizeof(hex_buff) - 2 * i, "%02x", buff[i]);
|
||||
}
|
||||
return hex_buff;
|
||||
}
|
||||
|
|
|
@ -74,13 +74,17 @@ struct CellHash {
|
|||
};
|
||||
} // namespace vm
|
||||
|
||||
inline size_t cell_hash_slice_hash(td::Slice hash) {
|
||||
// use offset 8, because in db keys are grouped by first bytes.
|
||||
return td::as<size_t>(hash.substr(8, 8).ubegin());
|
||||
}
|
||||
namespace std {
|
||||
template <>
|
||||
struct hash<vm::CellHash> {
|
||||
typedef vm::CellHash argument_type;
|
||||
typedef std::size_t result_type;
|
||||
result_type operator()(argument_type const& s) const noexcept {
|
||||
return td::as<size_t>(s.as_slice().ubegin());
|
||||
return cell_hash_slice_hash(s.as_slice());
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
|
|
@ -976,7 +976,7 @@ void CellSlice::dump(std::ostream& os, int level, bool endl) const {
|
|||
os << "; refs: " << refs_st << ".." << refs_en;
|
||||
if (level > 2) {
|
||||
char tmp[64];
|
||||
std::sprintf(tmp, "; ptr=data+%ld; z=%016llx",
|
||||
std::snprintf(tmp, sizeof(tmp), "; ptr=data+%ld; z=%016llx",
|
||||
static_cast<long>(ptr && cell.not_null() ? ptr - cell->get_data() : -1), static_cast<long long>(z));
|
||||
os << tmp << " (have " << size() << " bits; " << zd << " preloaded)";
|
||||
}
|
||||
|
|
|
@ -22,12 +22,12 @@ namespace vm {
|
|||
//
|
||||
// CellUsageTree::NodePtr
|
||||
//
|
||||
bool CellUsageTree::NodePtr::on_load() const {
|
||||
bool CellUsageTree::NodePtr::on_load(const td::Ref<vm::DataCell>& cell) const {
|
||||
auto tree = tree_weak_.lock();
|
||||
if (!tree) {
|
||||
return false;
|
||||
}
|
||||
tree->on_load(node_id_);
|
||||
tree->on_load(node_id_, cell);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -111,8 +111,14 @@ void CellUsageTree::set_use_mark_for_is_loaded(bool use_mark) {
|
|||
use_mark_ = use_mark;
|
||||
}
|
||||
|
||||
void CellUsageTree::on_load(NodeId node_id) {
|
||||
void CellUsageTree::on_load(NodeId node_id, const td::Ref<vm::DataCell>& cell) {
|
||||
if (nodes_[node_id].is_loaded) {
|
||||
return;
|
||||
}
|
||||
nodes_[node_id].is_loaded = true;
|
||||
if (cell_load_callback_) {
|
||||
cell_load_callback_(cell);
|
||||
}
|
||||
}
|
||||
|
||||
CellUsageTree::NodeId CellUsageTree::create_child(NodeId node_id, unsigned ref_id) {
|
||||
|
|
|
@ -22,8 +22,12 @@
|
|||
|
||||
#include "td/utils/int_types.h"
|
||||
#include "td/utils/logging.h"
|
||||
#include <functional>
|
||||
|
||||
namespace vm {
|
||||
|
||||
class DataCell;
|
||||
|
||||
class CellUsageTree : public std::enable_shared_from_this<CellUsageTree> {
|
||||
public:
|
||||
using NodeId = td::uint32;
|
||||
|
@ -38,7 +42,7 @@ class CellUsageTree : public std::enable_shared_from_this<CellUsageTree> {
|
|||
return node_id_ == 0 || tree_weak_.expired();
|
||||
}
|
||||
|
||||
bool on_load() const;
|
||||
bool on_load(const td::Ref<vm::DataCell>& cell) const;
|
||||
NodePtr create_child(unsigned ref_id) const;
|
||||
bool mark_path(CellUsageTree* master_tree) const;
|
||||
bool is_from_tree(const CellUsageTree* master_tree) const;
|
||||
|
@ -59,6 +63,10 @@ class CellUsageTree : public std::enable_shared_from_this<CellUsageTree> {
|
|||
void set_use_mark_for_is_loaded(bool use_mark = true);
|
||||
NodeId create_child(NodeId node_id, unsigned ref_id);
|
||||
|
||||
void set_cell_load_callback(std::function<void(const td::Ref<vm::DataCell>&)> f) {
|
||||
cell_load_callback_ = std::move(f);
|
||||
}
|
||||
|
||||
private:
|
||||
struct Node {
|
||||
bool is_loaded{false};
|
||||
|
@ -68,8 +76,9 @@ class CellUsageTree : public std::enable_shared_from_this<CellUsageTree> {
|
|||
};
|
||||
bool use_mark_{false};
|
||||
std::vector<Node> nodes_{2};
|
||||
std::function<void(const td::Ref<vm::DataCell>&)> cell_load_callback_;
|
||||
|
||||
void on_load(NodeId node_id);
|
||||
void on_load(NodeId node_id, const td::Ref<vm::DataCell>& cell);
|
||||
NodeId create_node(NodeId parent);
|
||||
};
|
||||
} // namespace vm
|
||||
|
|
|
@ -20,6 +20,15 @@
|
|||
|
||||
namespace vm {
|
||||
namespace detail {
|
||||
|
||||
template <class CellT>
|
||||
struct DefaultAllocator {
|
||||
template <class T, class... ArgsT>
|
||||
std::unique_ptr<CellT> make_unique(ArgsT&&... args) {
|
||||
return std::make_unique<T>(std::forward<ArgsT>(args)...);
|
||||
}
|
||||
};
|
||||
|
||||
template <class CellT, size_t Size = 0>
|
||||
class CellWithArrayStorage : public CellT {
|
||||
public:
|
||||
|
@ -29,14 +38,14 @@ class CellWithArrayStorage : public CellT {
|
|||
~CellWithArrayStorage() {
|
||||
CellT::destroy_storage(get_storage());
|
||||
}
|
||||
template <class... ArgsT>
|
||||
static std::unique_ptr<CellT> create(size_t storage_size, ArgsT&&... args) {
|
||||
template <class Allocator, class... ArgsT>
|
||||
static auto create(Allocator allocator, size_t storage_size, ArgsT&&... args) {
|
||||
static_assert(CellT::max_storage_size <= 40 * 8, "");
|
||||
//size = 128 + 32 + 8;
|
||||
auto size = (storage_size + 7) / 8;
|
||||
#define CASE(size) \
|
||||
case (size): \
|
||||
return std::make_unique<CellWithArrayStorage<CellT, (size)*8>>(std::forward<ArgsT>(args)...);
|
||||
return allocator. template make_unique<CellWithArrayStorage<CellT, (size) * 8>>(std::forward<ArgsT>(args)...);
|
||||
#define CASE2(offset) CASE(offset) CASE(offset + 1)
|
||||
#define CASE8(offset) CASE2(offset) CASE2(offset + 2) CASE2(offset + 4) CASE2(offset + 6)
|
||||
#define CASE32(offset) CASE8(offset) CASE8(offset + 8) CASE8(offset + 16) CASE8(offset + 24)
|
||||
|
@ -48,6 +57,10 @@ class CellWithArrayStorage : public CellT {
|
|||
LOG(FATAL) << "TOO BIG " << storage_size;
|
||||
UNREACHABLE();
|
||||
}
|
||||
template <class... ArgsT>
|
||||
static std::unique_ptr<CellT> create(size_t storage_size, ArgsT&&... args) {
|
||||
return create(DefaultAllocator<CellT>{}, storage_size, std::forward<ArgsT>(args)...);
|
||||
}
|
||||
|
||||
private:
|
||||
alignas(alignof(void*)) char storage_[Size];
|
||||
|
|
|
@ -25,7 +25,44 @@
|
|||
#include "vm/cells/CellWithStorage.h"
|
||||
|
||||
namespace vm {
|
||||
thread_local bool DataCell::use_arena = false;
|
||||
|
||||
namespace {
|
||||
template <class CellT>
|
||||
struct ArenaAllocator {
|
||||
template <class T, class... ArgsT>
|
||||
std::unique_ptr<CellT> make_unique(ArgsT&&... args) {
|
||||
auto* ptr = fast_alloc(sizeof(T));
|
||||
T* obj = new (ptr) T(std::forward<ArgsT>(args)...);
|
||||
return std::unique_ptr<T>(obj);
|
||||
}
|
||||
private:
|
||||
td::MutableSlice alloc_batch() {
|
||||
size_t batch_size = 1 << 20;
|
||||
auto batch = std::make_unique<char[]>(batch_size);
|
||||
return td::MutableSlice(batch.release(), batch_size);
|
||||
}
|
||||
char* fast_alloc(size_t size) {
|
||||
thread_local td::MutableSlice batch;
|
||||
auto aligned_size = (size + 7) / 8 * 8;
|
||||
if (batch.size() < size) {
|
||||
batch = alloc_batch();
|
||||
}
|
||||
auto res = batch.begin();
|
||||
batch.remove_prefix(aligned_size);
|
||||
return res;
|
||||
}
|
||||
};
|
||||
}
|
||||
std::unique_ptr<DataCell> DataCell::create_empty_data_cell(Info info) {
|
||||
if (use_arena) {
|
||||
ArenaAllocator<DataCell> allocator;
|
||||
auto res = detail::CellWithArrayStorage<DataCell>::create(allocator, info.get_storage_size(), info);
|
||||
// this is dangerous
|
||||
Ref<DataCell>(res.get()).release();
|
||||
return res;
|
||||
}
|
||||
|
||||
return detail::CellWithUniquePtrStorage<DataCell>::create(info.get_storage_size(), info);
|
||||
}
|
||||
|
||||
|
@ -359,7 +396,7 @@ std::string DataCell::to_hex() const {
|
|||
int len = serialize(buff, sizeof(buff));
|
||||
char hex_buff[max_serialized_bytes * 2 + 1];
|
||||
for (int i = 0; i < len; i++) {
|
||||
sprintf(hex_buff + 2 * i, "%02x", buff[i]);
|
||||
snprintf(hex_buff + 2 * i, sizeof(hex_buff) - 2 * i, "%02x", buff[i]);
|
||||
}
|
||||
return hex_buff;
|
||||
}
|
||||
|
|
|
@ -27,6 +27,9 @@ namespace vm {
|
|||
|
||||
class DataCell : public Cell {
|
||||
public:
|
||||
// NB: cells created with use_arena=true are never freed
|
||||
static thread_local bool use_arena;
|
||||
|
||||
DataCell(const DataCell& other) = delete;
|
||||
~DataCell() override;
|
||||
|
||||
|
@ -121,10 +124,6 @@ class DataCell : public Cell {
|
|||
void destroy_storage(char* storage);
|
||||
|
||||
explicit DataCell(Info info);
|
||||
Cell* get_ref_raw_ptr(unsigned idx) const {
|
||||
DCHECK(idx < get_refs_cnt());
|
||||
return info_.get_refs(get_storage())[idx];
|
||||
}
|
||||
|
||||
public:
|
||||
td::Result<LoadedCell> load_cell() const override {
|
||||
|
@ -152,6 +151,20 @@ class DataCell : public Cell {
|
|||
return Ref<Cell>(get_ref_raw_ptr(idx));
|
||||
}
|
||||
|
||||
Cell* get_ref_raw_ptr(unsigned idx) const {
|
||||
DCHECK(idx < get_refs_cnt());
|
||||
return info_.get_refs(get_storage())[idx];
|
||||
}
|
||||
|
||||
Ref<Cell> reset_ref_unsafe(unsigned idx, Ref<Cell> ref, bool check_hash = true) {
|
||||
CHECK(idx < get_refs_cnt());
|
||||
auto refs = info_.get_refs(get_storage());
|
||||
CHECK(!check_hash || refs[idx]->get_hash() == ref->get_hash());
|
||||
auto res = Ref<Cell>(refs[idx], Ref<Cell>::acquire_t{}); // call destructor
|
||||
refs[idx] = ref.release();
|
||||
return res;
|
||||
}
|
||||
|
||||
td::uint32 get_virtualization() const override {
|
||||
return info_.virtualization_;
|
||||
}
|
||||
|
@ -173,6 +186,9 @@ class DataCell : public Cell {
|
|||
return ((get_bits() + 23) >> 3) +
|
||||
(with_hashes ? get_level_mask().get_hashes_count() * (hash_bytes + depth_bytes) : 0);
|
||||
}
|
||||
size_t get_storage_size() const {
|
||||
return info_.get_storage_size();
|
||||
}
|
||||
int serialize(unsigned char* buff, int buff_size, bool with_hashes = false) const;
|
||||
std::string serialize() const;
|
||||
std::string to_hex() const;
|
||||
|
@ -207,6 +223,9 @@ class DataCell : public Cell {
|
|||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const DataCell& c);
|
||||
inline CellHash as_cell_hash(const Ref<DataCell>& cell) {
|
||||
return cell->get_hash();
|
||||
}
|
||||
|
||||
} // namespace vm
|
||||
|
||||
|
|
|
@ -66,6 +66,10 @@ class MerkleProofBuilder {
|
|||
td::Result<Ref<Cell>> extract_proof() const;
|
||||
bool extract_proof_to(Ref<Cell> &proof_root) const;
|
||||
td::Result<td::BufferSlice> extract_proof_boc() const;
|
||||
|
||||
void set_cell_load_callback(std::function<void(const td::Ref<vm::DataCell>&)> f) {
|
||||
usage_tree->set_cell_load_callback(std::move(f));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace vm
|
||||
|
|
|
@ -30,18 +30,27 @@ struct PrunnedCellInfo {
|
|||
template <class ExtraT>
|
||||
class PrunnedCell : public Cell {
|
||||
public:
|
||||
ExtraT& get_extra() {
|
||||
return extra_;
|
||||
}
|
||||
const ExtraT& get_extra() const {
|
||||
return extra_;
|
||||
}
|
||||
|
||||
static td::Result<Ref<PrunnedCell<ExtraT>>> create(const PrunnedCellInfo& prunned_cell_info, ExtraT&& extra) {
|
||||
return create(detail::DefaultAllocator<PrunnedCell<ExtraT>>(), prunned_cell_info, std::forward<ExtraT>(extra));
|
||||
}
|
||||
|
||||
template <class AllocatorT>
|
||||
static td::Result<Ref<PrunnedCell<ExtraT>>> create(AllocatorT allocator, const PrunnedCellInfo& prunned_cell_info,
|
||||
ExtraT&& extra) {
|
||||
auto level_mask = prunned_cell_info.level_mask;
|
||||
if (level_mask.get_level() > max_level) {
|
||||
return td::Status::Error("Level is too big");
|
||||
}
|
||||
Info info(level_mask);
|
||||
auto prunned_cell =
|
||||
detail::CellWithUniquePtrStorage<PrunnedCell<ExtraT>>::create(info.get_storage_size(), info, std::move(extra));
|
||||
detail::CellWithArrayStorage<PrunnedCell<ExtraT>>::create(allocator, info.get_storage_size(), info, std::move(extra));
|
||||
TRY_STATUS(prunned_cell->init(prunned_cell_info));
|
||||
return Ref<PrunnedCell<ExtraT>>(prunned_cell.release(), typename Ref<PrunnedCell<ExtraT>>::acquire_t{});
|
||||
}
|
||||
|
@ -51,6 +60,7 @@ class PrunnedCell : public Cell {
|
|||
}
|
||||
|
||||
protected:
|
||||
static constexpr auto max_storage_size = (max_level + 1) * (hash_bytes + sizeof(td::uint16));
|
||||
struct Info {
|
||||
Info(LevelMask level_mask) {
|
||||
level_mask_ = level_mask.get_mask() & 7;
|
||||
|
|
|
@ -39,7 +39,7 @@ class UsageCell : public Cell {
|
|||
// load interface
|
||||
td::Result<LoadedCell> load_cell() const override {
|
||||
TRY_RESULT(loaded_cell, cell_->load_cell());
|
||||
if (tree_node_.on_load()) {
|
||||
if (tree_node_.on_load(loaded_cell.data_cell)) {
|
||||
CHECK(loaded_cell.tree_node.empty());
|
||||
loaded_cell.tree_node = tree_node_;
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
|
||||
namespace vm {
|
||||
|
||||
int Continuation::jump_w(VmState* st) & {
|
||||
return static_cast<const Continuation*>(this)->jump(st);
|
||||
td::Ref<Continuation> Continuation::jump_w(VmState* st, int& exitcode) & {
|
||||
return static_cast<const Continuation*>(this)->jump(st, exitcode);
|
||||
}
|
||||
|
||||
bool Continuation::has_c0() const {
|
||||
|
@ -286,7 +286,7 @@ std::string QuitCont::type() const {
|
|||
return "vmc_quit";
|
||||
}
|
||||
|
||||
int ExcQuitCont::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> ExcQuitCont::jump(VmState* st, int& exitcode) const& {
|
||||
int n = 0;
|
||||
try {
|
||||
n = st->get_stack().pop_smallint_range(0xffff);
|
||||
|
@ -294,7 +294,8 @@ int ExcQuitCont::jump(VmState* st) const & {
|
|||
n = vme.get_errno();
|
||||
}
|
||||
VM_LOG(st) << "default exception handler, terminating vm with exit code " << n;
|
||||
return ~n;
|
||||
exitcode = ~n;
|
||||
return {};
|
||||
}
|
||||
|
||||
std::string ExcQuitCont::type() const {
|
||||
|
@ -311,16 +312,16 @@ Ref<ExcQuitCont> ExcQuitCont::deserialize(CellSlice& cs, int mode) {
|
|||
return cs.fetch_ulong(4) == 9 ? Ref<ExcQuitCont>{true} : Ref<ExcQuitCont>{};
|
||||
}
|
||||
|
||||
int PushIntCont::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> PushIntCont::jump(VmState* st, int& exitcode) const& {
|
||||
VM_LOG(st) << "execute implicit PUSH " << push_val << " (slow)";
|
||||
st->get_stack().push_smallint(push_val);
|
||||
return st->jump(next);
|
||||
return next;
|
||||
}
|
||||
|
||||
int PushIntCont::jump_w(VmState* st) & {
|
||||
td::Ref<Continuation> PushIntCont::jump_w(VmState* st, int& exitcode) & {
|
||||
VM_LOG(st) << "execute implicit PUSH " << push_val;
|
||||
st->get_stack().push_smallint(push_val);
|
||||
return st->jump(std::move(next));
|
||||
return std::move(next);
|
||||
}
|
||||
|
||||
std::string PushIntCont::type() const {
|
||||
|
@ -345,20 +346,20 @@ Ref<PushIntCont> PushIntCont::deserialize(CellSlice& cs, int mode) {
|
|||
}
|
||||
}
|
||||
|
||||
int ArgContExt::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> ArgContExt::jump(VmState* st, int& exitcode) const& {
|
||||
st->adjust_cr(data.save);
|
||||
if (data.cp != -1) {
|
||||
st->force_cp(data.cp);
|
||||
}
|
||||
return ext->jump(st);
|
||||
return ext;
|
||||
}
|
||||
|
||||
int ArgContExt::jump_w(VmState* st) & {
|
||||
td::Ref<Continuation> ArgContExt::jump_w(VmState* st, int& exitcode) & {
|
||||
st->adjust_cr(std::move(data.save));
|
||||
if (data.cp != -1) {
|
||||
st->force_cp(data.cp);
|
||||
}
|
||||
return st->jump_to(std::move(ext));
|
||||
return std::move(ext);
|
||||
}
|
||||
|
||||
bool ArgContExt::serialize(CellBuilder& cb) const {
|
||||
|
@ -382,32 +383,32 @@ std::string ArgContExt::type() const {
|
|||
return "vmc_envelope";
|
||||
}
|
||||
|
||||
int RepeatCont::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> RepeatCont::jump(VmState* st, int& exitcode) const& {
|
||||
VM_LOG(st) << "repeat " << count << " more times (slow)\n";
|
||||
if (count <= 0) {
|
||||
return st->jump(after);
|
||||
return after;
|
||||
}
|
||||
if (body->has_c0()) {
|
||||
return st->jump(body);
|
||||
return body;
|
||||
}
|
||||
st->set_c0(Ref<RepeatCont>{true, body, after, count - 1});
|
||||
return st->jump(body);
|
||||
return body;
|
||||
}
|
||||
|
||||
int RepeatCont::jump_w(VmState* st) & {
|
||||
td::Ref<Continuation> RepeatCont::jump_w(VmState* st, int& exitcode) & {
|
||||
VM_LOG(st) << "repeat " << count << " more times\n";
|
||||
if (count <= 0) {
|
||||
body.clear();
|
||||
return st->jump(std::move(after));
|
||||
return std::move(after);
|
||||
}
|
||||
if (body->has_c0()) {
|
||||
after.clear();
|
||||
return st->jump(std::move(body));
|
||||
return std::move(body);
|
||||
}
|
||||
// optimization: since this is unique, reuse *this instead of creating new object
|
||||
--count;
|
||||
st->set_c0(Ref<RepeatCont>{this});
|
||||
return st->jump(body);
|
||||
return body;
|
||||
}
|
||||
|
||||
bool RepeatCont::serialize(CellBuilder& cb) const {
|
||||
|
@ -443,21 +444,21 @@ int VmState::repeat(Ref<Continuation> body, Ref<Continuation> after, long long c
|
|||
}
|
||||
}
|
||||
|
||||
int AgainCont::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> AgainCont::jump(VmState* st, int& exitcode) const& {
|
||||
VM_LOG(st) << "again an infinite loop iteration (slow)\n";
|
||||
if (!body->has_c0()) {
|
||||
st->set_c0(Ref<AgainCont>{this});
|
||||
}
|
||||
return st->jump(body);
|
||||
return body;
|
||||
}
|
||||
|
||||
int AgainCont::jump_w(VmState* st) & {
|
||||
td::Ref<Continuation> AgainCont::jump_w(VmState* st, int& exitcode) & {
|
||||
VM_LOG(st) << "again an infinite loop iteration\n";
|
||||
if (!body->has_c0()) {
|
||||
st->set_c0(Ref<AgainCont>{this});
|
||||
return st->jump(body);
|
||||
return body;
|
||||
} else {
|
||||
return st->jump(std::move(body));
|
||||
return std::move(body);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -485,31 +486,31 @@ int VmState::again(Ref<Continuation> body) {
|
|||
return jump(Ref<AgainCont>{true, std::move(body)});
|
||||
}
|
||||
|
||||
int UntilCont::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> UntilCont::jump(VmState* st, int& exitcode) const& {
|
||||
VM_LOG(st) << "until loop body end (slow)\n";
|
||||
if (st->get_stack().pop_bool()) {
|
||||
VM_LOG(st) << "until loop terminated\n";
|
||||
return st->jump(after);
|
||||
return after;
|
||||
}
|
||||
if (!body->has_c0()) {
|
||||
st->set_c0(Ref<UntilCont>{this});
|
||||
}
|
||||
return st->jump(body);
|
||||
return body;
|
||||
}
|
||||
|
||||
int UntilCont::jump_w(VmState* st) & {
|
||||
td::Ref<Continuation> UntilCont::jump_w(VmState* st, int& exitcode) & {
|
||||
VM_LOG(st) << "until loop body end\n";
|
||||
if (st->get_stack().pop_bool()) {
|
||||
VM_LOG(st) << "until loop terminated\n";
|
||||
body.clear();
|
||||
return st->jump(std::move(after));
|
||||
return std::move(after);
|
||||
}
|
||||
if (!body->has_c0()) {
|
||||
st->set_c0(Ref<UntilCont>{this});
|
||||
return st->jump(body);
|
||||
return body;
|
||||
} else {
|
||||
after.clear();
|
||||
return st->jump(std::move(body));
|
||||
return std::move(body);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -541,54 +542,54 @@ int VmState::until(Ref<Continuation> body, Ref<Continuation> after) {
|
|||
return jump(std::move(body));
|
||||
}
|
||||
|
||||
int WhileCont::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> WhileCont::jump(VmState* st, int& exitcode) const& {
|
||||
if (chkcond) {
|
||||
VM_LOG(st) << "while loop condition end (slow)\n";
|
||||
if (!st->get_stack().pop_bool()) {
|
||||
VM_LOG(st) << "while loop terminated\n";
|
||||
return st->jump(after);
|
||||
return after;
|
||||
}
|
||||
if (!body->has_c0()) {
|
||||
st->set_c0(Ref<WhileCont>{true, cond, body, after, false});
|
||||
}
|
||||
return st->jump(body);
|
||||
return body;
|
||||
} else {
|
||||
VM_LOG(st) << "while loop body end (slow)\n";
|
||||
if (!cond->has_c0()) {
|
||||
st->set_c0(Ref<WhileCont>{true, cond, body, after, true});
|
||||
}
|
||||
return st->jump(cond);
|
||||
return cond;
|
||||
}
|
||||
}
|
||||
|
||||
int WhileCont::jump_w(VmState* st) & {
|
||||
td::Ref<Continuation> WhileCont::jump_w(VmState* st, int& exitcode) & {
|
||||
if (chkcond) {
|
||||
VM_LOG(st) << "while loop condition end\n";
|
||||
if (!st->get_stack().pop_bool()) {
|
||||
VM_LOG(st) << "while loop terminated\n";
|
||||
cond.clear();
|
||||
body.clear();
|
||||
return st->jump(std::move(after));
|
||||
return std::move(after);
|
||||
}
|
||||
if (!body->has_c0()) {
|
||||
chkcond = false; // re-use current object since we hold the unique pointer to it
|
||||
st->set_c0(Ref<WhileCont>{this});
|
||||
return st->jump(body);
|
||||
return body;
|
||||
} else {
|
||||
cond.clear();
|
||||
after.clear();
|
||||
return st->jump(std::move(body));
|
||||
return std::move(body);
|
||||
}
|
||||
} else {
|
||||
VM_LOG(st) << "while loop body end\n";
|
||||
if (!cond->has_c0()) {
|
||||
chkcond = true; // re-use current object
|
||||
st->set_c0(Ref<WhileCont>{this});
|
||||
return st->jump(cond);
|
||||
return cond;
|
||||
} else {
|
||||
body.clear();
|
||||
after.clear();
|
||||
return st->jump(std::move(cond));
|
||||
return std::move(cond);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -627,16 +628,16 @@ int VmState::loop_while(Ref<Continuation> cond, Ref<Continuation> body, Ref<Cont
|
|||
return jump(std::move(cond));
|
||||
}
|
||||
|
||||
int OrdCont::jump(VmState* st) const & {
|
||||
td::Ref<Continuation> OrdCont::jump(VmState* st, int& exitcode) const& {
|
||||
st->adjust_cr(data.save);
|
||||
st->set_code(code, data.cp);
|
||||
return 0;
|
||||
return {};
|
||||
}
|
||||
|
||||
int OrdCont::jump_w(VmState* st) & {
|
||||
td::Ref<Continuation> OrdCont::jump_w(VmState* st, int& exitcode) & {
|
||||
st->adjust_cr(std::move(data.save));
|
||||
st->set_code(std::move(code), data.cp);
|
||||
return 0;
|
||||
return {};
|
||||
}
|
||||
|
||||
bool OrdCont::serialize(CellBuilder& cb) const {
|
||||
|
|
|
@ -161,8 +161,8 @@ struct ControlData {
|
|||
|
||||
class Continuation : public td::CntObject {
|
||||
public:
|
||||
virtual int jump(VmState* st) const & = 0;
|
||||
virtual int jump_w(VmState* st) &;
|
||||
virtual td::Ref<Continuation> jump(VmState* st, int& exitcode) const& = 0;
|
||||
virtual td::Ref<Continuation> jump_w(VmState* st, int& exitcode) &;
|
||||
virtual ControlData* get_cdata() {
|
||||
return 0;
|
||||
}
|
||||
|
@ -203,8 +203,9 @@ class QuitCont : public Continuation {
|
|||
QuitCont(int _code = 0) : exit_code(_code) {
|
||||
}
|
||||
~QuitCont() override = default;
|
||||
int jump(VmState* st) const & override {
|
||||
return ~exit_code;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override {
|
||||
exitcode = ~exit_code;
|
||||
return {};
|
||||
}
|
||||
bool serialize(CellBuilder& cb) const override;
|
||||
static Ref<QuitCont> deserialize(CellSlice& cs, int mode = 0);
|
||||
|
@ -215,7 +216,7 @@ class ExcQuitCont : public Continuation {
|
|||
public:
|
||||
ExcQuitCont() = default;
|
||||
~ExcQuitCont() override = default;
|
||||
int jump(VmState* st) const & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
bool serialize(CellBuilder& cb) const override;
|
||||
static Ref<ExcQuitCont> deserialize(CellSlice& cs, int mode = 0);
|
||||
std::string type() const override;
|
||||
|
@ -229,8 +230,8 @@ class PushIntCont : public Continuation {
|
|||
PushIntCont(int val, Ref<Continuation> _next) : push_val(val), next(_next) {
|
||||
}
|
||||
~PushIntCont() override = default;
|
||||
int jump(VmState* st) const & override;
|
||||
int jump_w(VmState* st) & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
td::Ref<Continuation> jump_w(VmState* st, int& exitcode) & override;
|
||||
bool serialize(CellBuilder& cb) const override;
|
||||
static Ref<PushIntCont> deserialize(CellSlice& cs, int mode = 0);
|
||||
std::string type() const override;
|
||||
|
@ -245,8 +246,8 @@ class RepeatCont : public Continuation {
|
|||
: body(std::move(_body)), after(std::move(_after)), count(_count) {
|
||||
}
|
||||
~RepeatCont() override = default;
|
||||
int jump(VmState* st) const & override;
|
||||
int jump_w(VmState* st) & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
td::Ref<Continuation> jump_w(VmState* st, int& exitcode) & override;
|
||||
bool serialize(CellBuilder& cb) const override;
|
||||
static Ref<RepeatCont> deserialize(CellSlice& cs, int mode = 0);
|
||||
std::string type() const override;
|
||||
|
@ -259,8 +260,8 @@ class AgainCont : public Continuation {
|
|||
AgainCont(Ref<Continuation> _body) : body(std::move(_body)) {
|
||||
}
|
||||
~AgainCont() override = default;
|
||||
int jump(VmState* st) const & override;
|
||||
int jump_w(VmState* st) & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
td::Ref<Continuation> jump_w(VmState* st, int& exitcode) & override;
|
||||
bool serialize(CellBuilder& cb) const override;
|
||||
static Ref<AgainCont> deserialize(CellSlice& cs, int mode = 0);
|
||||
std::string type() const override;
|
||||
|
@ -273,8 +274,8 @@ class UntilCont : public Continuation {
|
|||
UntilCont(Ref<Continuation> _body, Ref<Continuation> _after) : body(std::move(_body)), after(std::move(_after)) {
|
||||
}
|
||||
~UntilCont() override = default;
|
||||
int jump(VmState* st) const & override;
|
||||
int jump_w(VmState* st) & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
td::Ref<Continuation> jump_w(VmState* st, int& exitcode) & override;
|
||||
bool serialize(CellBuilder& cb) const override;
|
||||
static Ref<UntilCont> deserialize(CellSlice& cs, int mode = 0);
|
||||
std::string type() const override;
|
||||
|
@ -289,8 +290,8 @@ class WhileCont : public Continuation {
|
|||
: cond(std::move(_cond)), body(std::move(_body)), after(std::move(_after)), chkcond(_chk) {
|
||||
}
|
||||
~WhileCont() override = default;
|
||||
int jump(VmState* st) const & override;
|
||||
int jump_w(VmState* st) & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
td::Ref<Continuation> jump_w(VmState* st, int& exitcode) & override;
|
||||
bool serialize(CellBuilder& cb) const override;
|
||||
static Ref<WhileCont> deserialize(CellSlice& cs, int mode = 0);
|
||||
std::string type() const override;
|
||||
|
@ -312,8 +313,8 @@ class ArgContExt : public Continuation {
|
|||
ArgContExt(const ArgContExt&) = default;
|
||||
ArgContExt(ArgContExt&&) = default;
|
||||
~ArgContExt() override = default;
|
||||
int jump(VmState* st) const & override;
|
||||
int jump_w(VmState* st) & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
td::Ref<Continuation> jump_w(VmState* st, int& exitcode) & override;
|
||||
ControlData* get_cdata() override {
|
||||
return &data;
|
||||
}
|
||||
|
@ -354,8 +355,8 @@ class OrdCont : public Continuation {
|
|||
td::CntObject* make_copy() const override {
|
||||
return new OrdCont{*this};
|
||||
}
|
||||
int jump(VmState* st) const & override;
|
||||
int jump_w(VmState* st) & override;
|
||||
td::Ref<Continuation> jump(VmState* st, int& exitcode) const& override;
|
||||
td::Ref<Continuation> jump_w(VmState* st, int& exitcode) & override;
|
||||
|
||||
ControlData* get_cdata() override {
|
||||
return &data;
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "td/utils/Slice.h"
|
||||
|
||||
#include "td/utils/HashSet.h"
|
||||
#include <set>
|
||||
|
||||
namespace vm {
|
||||
|
@ -43,7 +43,7 @@ class CellHashTable {
|
|||
template <class F>
|
||||
void for_each(F &&f) {
|
||||
for (auto &info : set_) {
|
||||
f(info);
|
||||
f(const_cast<InfoT &>(info));
|
||||
}
|
||||
}
|
||||
template <class F>
|
||||
|
@ -73,6 +73,6 @@ class CellHashTable {
|
|||
}
|
||||
|
||||
private:
|
||||
std::set<InfoT, std::less<>> set_;
|
||||
td::NodeHashSet<InfoT, typename InfoT::Hash, typename InfoT::Eq> set_;
|
||||
};
|
||||
} // namespace vm
|
||||
|
|
|
@ -33,6 +33,7 @@ class RefcntCellStorer {
|
|||
|
||||
template <class StorerT>
|
||||
void store(StorerT &storer) const {
|
||||
TD_PERF_COUNTER(cell_store);
|
||||
using td::store;
|
||||
if (as_boc_) {
|
||||
td::int32 tag = -1;
|
||||
|
@ -151,18 +152,27 @@ CellLoader::CellLoader(std::shared_ptr<KeyValueReader> reader, std::function<voi
|
|||
|
||||
td::Result<CellLoader::LoadResult> CellLoader::load(td::Slice hash, bool need_data, ExtCellCreator &ext_cell_creator) {
|
||||
//LOG(ERROR) << "Storage: load cell " << hash.size() << " " << td::base64_encode(hash);
|
||||
LoadResult res;
|
||||
TD_PERF_COUNTER(cell_load);
|
||||
std::string serialized;
|
||||
TRY_RESULT(get_status, reader_->get(hash, serialized));
|
||||
if (get_status != KeyValue::GetStatus::Ok) {
|
||||
DCHECK(get_status == KeyValue::GetStatus::NotFound);
|
||||
return res;
|
||||
return LoadResult{};
|
||||
}
|
||||
TRY_RESULT(res, load(hash, serialized, need_data, ext_cell_creator));
|
||||
if (on_load_callback_) {
|
||||
on_load_callback_(res);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
td::Result<CellLoader::LoadResult> CellLoader::load(td::Slice hash, td::Slice value, bool need_data,
|
||||
ExtCellCreator &ext_cell_creator) {
|
||||
LoadResult res;
|
||||
res.status = LoadResult::Ok;
|
||||
|
||||
RefcntCellParser refcnt_cell(need_data);
|
||||
td::TlParser parser(serialized);
|
||||
td::TlParser parser(value);
|
||||
refcnt_cell.parse(parser, ext_cell_creator);
|
||||
TRY_STATUS(parser.get_status());
|
||||
|
||||
|
@ -170,13 +180,28 @@ td::Result<CellLoader::LoadResult> CellLoader::load(td::Slice hash, bool need_da
|
|||
res.cell_ = std::move(refcnt_cell.cell);
|
||||
res.stored_boc_ = refcnt_cell.stored_boc_;
|
||||
//CHECK(res.cell_->get_hash() == hash);
|
||||
if (on_load_callback_) {
|
||||
on_load_callback_(res);
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
td::Result<CellLoader::LoadResult> CellLoader::load_refcnt(td::Slice hash) {
|
||||
LoadResult res;
|
||||
std::string serialized;
|
||||
TRY_RESULT(get_status, reader_->get(hash, serialized));
|
||||
if (get_status != KeyValue::GetStatus::Ok) {
|
||||
DCHECK(get_status == KeyValue::GetStatus::NotFound);
|
||||
return res;
|
||||
}
|
||||
res.status = LoadResult::Ok;
|
||||
td::TlParser parser(serialized);
|
||||
td::parse(res.refcnt_, parser);
|
||||
if (res.refcnt_ == -1) {
|
||||
parse(res.refcnt_, parser);
|
||||
}
|
||||
TRY_STATUS(parser.get_status());
|
||||
return res;
|
||||
}
|
||||
|
||||
CellStorer::CellStorer(KeyValue &kv) : kv_(kv) {
|
||||
}
|
||||
|
||||
|
@ -184,7 +209,11 @@ td::Status CellStorer::erase(td::Slice hash) {
|
|||
return kv_.erase(hash);
|
||||
}
|
||||
|
||||
std::string CellStorer::serialize_value(td::int32 refcnt, const td::Ref<DataCell> &cell, bool as_boc) {
|
||||
return td::serialize(RefcntCellStorer(refcnt, cell, as_boc));
|
||||
}
|
||||
|
||||
td::Status CellStorer::set(td::int32 refcnt, const td::Ref<DataCell> &cell, bool as_boc) {
|
||||
return kv_.set(cell->get_hash().as_slice(), td::serialize(RefcntCellStorer(refcnt, cell, as_boc)));
|
||||
return kv_.set(cell->get_hash().as_slice(), serialize_value(refcnt, cell, as_boc));
|
||||
}
|
||||
} // namespace vm
|
||||
|
|
|
@ -49,6 +49,8 @@ class CellLoader {
|
|||
};
|
||||
CellLoader(std::shared_ptr<KeyValueReader> reader, std::function<void(const LoadResult &)> on_load_callback = {});
|
||||
td::Result<LoadResult> load(td::Slice hash, bool need_data, ExtCellCreator &ext_cell_creator);
|
||||
static td::Result<LoadResult> load(td::Slice hash, td::Slice value, bool need_data, ExtCellCreator &ext_cell_creator);
|
||||
td::Result<LoadResult> load_refcnt(td::Slice hash); // This only loads refcnt_, cell_ == null
|
||||
|
||||
private:
|
||||
std::shared_ptr<KeyValueReader> reader_;
|
||||
|
@ -60,6 +62,7 @@ class CellStorer {
|
|||
CellStorer(KeyValue &kv);
|
||||
td::Status erase(td::Slice hash);
|
||||
td::Status set(td::int32 refcnt, const td::Ref<DataCell> &cell, bool as_boc);
|
||||
static std::string serialize_value(td::int32 refcnt, const td::Ref<DataCell> &cell, bool as_boc);
|
||||
|
||||
private:
|
||||
KeyValue &kv_;
|
||||
|
|
|
@ -27,6 +27,9 @@
|
|||
#include "td/utils/ThreadSafeCounter.h"
|
||||
|
||||
#include "vm/cellslice.h"
|
||||
#include <queue>
|
||||
#include "td/actor/actor.h"
|
||||
#include "common/delay.h"
|
||||
|
||||
namespace vm {
|
||||
namespace {
|
||||
|
@ -60,6 +63,20 @@ struct CellInfo {
|
|||
bool operator<(const CellInfo &other) const {
|
||||
return key() < other.key();
|
||||
}
|
||||
|
||||
struct Eq {
|
||||
using is_transparent = void; // Pred to use
|
||||
bool operator()(const CellInfo &info, const CellInfo &other_info) const { return info.key() == other_info.key();}
|
||||
bool operator()(const CellInfo &info, td::Slice hash) const { return info.key().as_slice() == hash;}
|
||||
bool operator()(td::Slice hash, const CellInfo &info) const { return info.key().as_slice() == hash;}
|
||||
|
||||
};
|
||||
struct Hash {
|
||||
using is_transparent = void; // Pred to use
|
||||
using transparent_key_equal = Eq;
|
||||
size_t operator()(td::Slice hash) const { return cell_hash_slice_hash(hash); }
|
||||
size_t operator()(const CellInfo &info) const { return cell_hash_slice_hash(info.key().as_slice());}
|
||||
};
|
||||
};
|
||||
|
||||
bool operator<(const CellInfo &a, td::Slice b) {
|
||||
|
@ -86,6 +103,12 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat
|
|||
TRY_RESULT(loaded_cell, get_cell_info_force(hash).cell->load_cell());
|
||||
return std::move(loaded_cell.data_cell);
|
||||
}
|
||||
td::Result<Ref<DataCell>> load_root(td::Slice hash) override {
|
||||
return load_cell(hash);
|
||||
}
|
||||
td::Result<Ref<DataCell>> load_root_thread_safe(td::Slice hash) const override {
|
||||
return td::Status::Error("Not implemented");
|
||||
}
|
||||
void load_cell_async(td::Slice hash, std::shared_ptr<AsyncExecutor> executor,
|
||||
td::Promise<Ref<DataCell>> promise) override {
|
||||
auto info = hash_table_.get_if_exists(hash);
|
||||
|
@ -160,6 +183,9 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat
|
|||
}
|
||||
|
||||
td::Status prepare_commit() override {
|
||||
if (pca_state_) {
|
||||
return td::Status::Error("prepare_commit_async is not finished");
|
||||
}
|
||||
if (is_prepared_for_commit()) {
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
@ -565,6 +591,221 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat
|
|||
DynamicBocExtCellExtra{cell_db_reader_}));
|
||||
return std::move(res);
|
||||
}
|
||||
|
||||
struct PrepareCommitAsyncState {
|
||||
size_t remaining_ = 0;
|
||||
std::shared_ptr<AsyncExecutor> executor_;
|
||||
td::Promise<td::Unit> promise_;
|
||||
|
||||
struct CellInfo2 {
|
||||
CellInfo *info{};
|
||||
std::vector<CellInfo2 *> parents;
|
||||
unsigned remaining_children = 0;
|
||||
Cell::Hash key() const {
|
||||
return info->key();
|
||||
}
|
||||
bool operator<(const CellInfo2 &other) const {
|
||||
return key() < other.key();
|
||||
}
|
||||
|
||||
friend bool operator<(const CellInfo2 &a, td::Slice b) {
|
||||
return a.key().as_slice() < b;
|
||||
}
|
||||
|
||||
friend bool operator<(td::Slice a, const CellInfo2 &b) {
|
||||
return a < b.key().as_slice();
|
||||
}
|
||||
|
||||
struct Eq {
|
||||
using is_transparent = void; // Pred to use
|
||||
bool operator()(const CellInfo2 &info, const CellInfo2 &other_info) const {
|
||||
return info.key() == other_info.key();
|
||||
}
|
||||
bool operator()(const CellInfo2 &info, td::Slice hash) const {
|
||||
return info.key().as_slice() == hash;
|
||||
}
|
||||
bool operator()(td::Slice hash, const CellInfo2 &info) const {
|
||||
return info.key().as_slice() == hash;
|
||||
}
|
||||
};
|
||||
struct Hash {
|
||||
using is_transparent = void; // Pred to use
|
||||
using transparent_key_equal = Eq;
|
||||
size_t operator()(td::Slice hash) const {
|
||||
return cell_hash_slice_hash(hash);
|
||||
}
|
||||
size_t operator()(const CellInfo2 &info) const {
|
||||
return cell_hash_slice_hash(info.key().as_slice());
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
CellHashTable<CellInfo2> cells_;
|
||||
|
||||
std::queue<CellInfo2*> load_queue_;
|
||||
td::uint32 active_load_ = 0;
|
||||
td::uint32 max_parallel_load_ = 4;
|
||||
};
|
||||
std::unique_ptr<PrepareCommitAsyncState> pca_state_;
|
||||
|
||||
void prepare_commit_async(std::shared_ptr<AsyncExecutor> executor, td::Promise<td::Unit> promise) override {
|
||||
hash_table_ = {};
|
||||
if (pca_state_) {
|
||||
promise.set_error(td::Status::Error("Other prepare_commit_async is not finished"));
|
||||
return;
|
||||
}
|
||||
if (is_prepared_for_commit()) {
|
||||
promise.set_result(td::Unit());
|
||||
return;
|
||||
}
|
||||
pca_state_ = std::make_unique<PrepareCommitAsyncState>();
|
||||
pca_state_->executor_ = std::move(executor);
|
||||
pca_state_->promise_ = std::move(promise);
|
||||
for (auto &new_cell : to_inc_) {
|
||||
dfs_new_cells_in_db_async(new_cell);
|
||||
}
|
||||
pca_state_->cells_.for_each([&](PrepareCommitAsyncState::CellInfo2 &info) {
|
||||
++pca_state_->remaining_;
|
||||
if (info.remaining_children == 0) {
|
||||
pca_load_from_db(&info);
|
||||
}
|
||||
});
|
||||
if (pca_state_->remaining_ == 0) {
|
||||
prepare_commit_async_cont();
|
||||
}
|
||||
}
|
||||
|
||||
void dfs_new_cells_in_db_async(const td::Ref<vm::Cell> &cell, PrepareCommitAsyncState::CellInfo2 *parent = nullptr) {
|
||||
bool exists = true;
|
||||
pca_state_->cells_.apply(cell->get_hash().as_slice(), [&](PrepareCommitAsyncState::CellInfo2 &info) {
|
||||
if (info.info == nullptr) {
|
||||
exists = false;
|
||||
info.info = &get_cell_info(cell);
|
||||
}
|
||||
});
|
||||
auto info = pca_state_->cells_.get_if_exists(cell->get_hash().as_slice());
|
||||
if (parent) {
|
||||
info->parents.push_back(parent);
|
||||
++parent->remaining_children;
|
||||
}
|
||||
if (exists) {
|
||||
return;
|
||||
}
|
||||
if (cell->is_loaded()) {
|
||||
vm::CellSlice cs(vm::NoVm{}, cell);
|
||||
for (unsigned i = 0; i < cs.size_refs(); i++) {
|
||||
dfs_new_cells_in_db_async(cs.prefetch_ref(i), info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void pca_load_from_db(PrepareCommitAsyncState::CellInfo2 *info) {
|
||||
if (pca_state_->active_load_ >= pca_state_->max_parallel_load_) {
|
||||
pca_state_->load_queue_.push(info);
|
||||
return;
|
||||
}
|
||||
++pca_state_->active_load_;
|
||||
pca_state_->executor_->execute_async(
|
||||
[db = this, info, executor = pca_state_->executor_, loader = *loader_]() mutable {
|
||||
auto res = loader.load_refcnt(info->info->cell->get_hash().as_slice()).move_as_ok();
|
||||
executor->execute_sync([db, info, res = std::move(res)]() {
|
||||
--db->pca_state_->active_load_;
|
||||
db->pca_process_load_queue();
|
||||
db->pca_set_in_db(info, std::move(res));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
void pca_process_load_queue() {
|
||||
while (pca_state_->active_load_ < pca_state_->max_parallel_load_ && !pca_state_->load_queue_.empty()) {
|
||||
PrepareCommitAsyncState::CellInfo2 *info = pca_state_->load_queue_.front();
|
||||
pca_state_->load_queue_.pop();
|
||||
pca_load_from_db(info);
|
||||
}
|
||||
}
|
||||
|
||||
void pca_set_in_db(PrepareCommitAsyncState::CellInfo2 *info, CellLoader::LoadResult result) {
|
||||
info->info->sync_with_db = true;
|
||||
if (result.status == CellLoader::LoadResult::Ok) {
|
||||
info->info->in_db = true;
|
||||
info->info->db_refcnt = result.refcnt();
|
||||
} else {
|
||||
info->info->in_db = false;
|
||||
}
|
||||
for (PrepareCommitAsyncState::CellInfo2 *parent_info : info->parents) {
|
||||
if (parent_info->info->sync_with_db) {
|
||||
continue;
|
||||
}
|
||||
if (!info->info->in_db) {
|
||||
pca_set_in_db(parent_info, {});
|
||||
} else if (--parent_info->remaining_children == 0) {
|
||||
pca_load_from_db(parent_info);
|
||||
}
|
||||
}
|
||||
CHECK(pca_state_->remaining_ != 0);
|
||||
if (--pca_state_->remaining_ == 0) {
|
||||
prepare_commit_async_cont();
|
||||
}
|
||||
}
|
||||
|
||||
void prepare_commit_async_cont() {
|
||||
for (auto &new_cell : to_inc_) {
|
||||
auto &new_cell_info = get_cell_info(new_cell);
|
||||
dfs_new_cells(new_cell_info);
|
||||
}
|
||||
|
||||
CHECK(pca_state_->remaining_ == 0);
|
||||
for (auto &old_cell : to_dec_) {
|
||||
auto &old_cell_info = get_cell_info(old_cell);
|
||||
dfs_old_cells_async(old_cell_info);
|
||||
}
|
||||
if (pca_state_->remaining_ == 0) {
|
||||
prepare_commit_async_cont2();
|
||||
}
|
||||
}
|
||||
|
||||
void dfs_old_cells_async(CellInfo &info) {
|
||||
if (!info.was) {
|
||||
info.was = true;
|
||||
visited_.push_back(&info);
|
||||
if (!info.sync_with_db) {
|
||||
++pca_state_->remaining_;
|
||||
load_cell_async(
|
||||
info.cell->get_hash().as_slice(), pca_state_->executor_,
|
||||
[executor = pca_state_->executor_, db = this, info = &info](td::Result<td::Ref<vm::DataCell>> R) {
|
||||
R.ensure();
|
||||
executor->execute_sync([db, info]() {
|
||||
CHECK(info->sync_with_db);
|
||||
db->dfs_old_cells_async(*info);
|
||||
if (--db->pca_state_->remaining_ == 0) {
|
||||
db->prepare_commit_async_cont2();
|
||||
}
|
||||
});
|
||||
});
|
||||
return;
|
||||
}
|
||||
}
|
||||
info.refcnt_diff--;
|
||||
if (!info.sync_with_db) {
|
||||
return;
|
||||
}
|
||||
auto new_refcnt = info.refcnt_diff + info.db_refcnt;
|
||||
CHECK(new_refcnt >= 0);
|
||||
if (new_refcnt != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
for_each(info, [this](auto &child_info) { dfs_old_cells_async(child_info); });
|
||||
}
|
||||
|
||||
void prepare_commit_async_cont2() {
|
||||
save_diff_prepare();
|
||||
to_inc_.clear();
|
||||
to_dec_.clear();
|
||||
pca_state_->promise_.set_result(td::Unit());
|
||||
pca_state_ = {};
|
||||
}
|
||||
|
||||
};
|
||||
} // namespace
|
||||
|
||||
|
|
|
@ -23,6 +23,11 @@
|
|||
#include "td/utils/Status.h"
|
||||
#include "td/actor/PromiseFuture.h"
|
||||
|
||||
#include <thread>
|
||||
|
||||
namespace td {
|
||||
class KeyValueReader;
|
||||
}
|
||||
namespace vm {
|
||||
class CellLoader;
|
||||
class CellStorer;
|
||||
|
@ -45,12 +50,20 @@ class DynamicBagOfCellsDb {
|
|||
public:
|
||||
virtual ~DynamicBagOfCellsDb() = default;
|
||||
virtual td::Result<Ref<DataCell>> load_cell(td::Slice hash) = 0;
|
||||
virtual td::Result<Ref<DataCell>> load_root(td::Slice hash) = 0;
|
||||
virtual td::Result<Ref<DataCell>> load_root_thread_safe(td::Slice hash) const = 0;
|
||||
struct Stats {
|
||||
td::int64 roots_total_count{0};
|
||||
td::int64 cells_total_count{0};
|
||||
td::int64 cells_total_size{0};
|
||||
void apply_diff(Stats diff) {
|
||||
std::vector<std::pair<std::string, std::string>> custom_stats;
|
||||
void apply_diff(const Stats &diff) {
|
||||
roots_total_count += diff.roots_total_count;
|
||||
cells_total_count += diff.cells_total_count;
|
||||
cells_total_size += diff.cells_total_size;
|
||||
CHECK(roots_total_count >= 0);
|
||||
CHECK(cells_total_count >= 0);
|
||||
CHECK(cells_total_size >= 0);
|
||||
}
|
||||
};
|
||||
virtual void inc(const Ref<Cell> &old_root) = 0;
|
||||
|
@ -58,6 +71,9 @@ class DynamicBagOfCellsDb {
|
|||
|
||||
virtual td::Status prepare_commit() = 0;
|
||||
virtual Stats get_stats_diff() = 0;
|
||||
virtual td::Result<Stats> get_stats() {
|
||||
return td::Status::Error("Not implemented");
|
||||
}
|
||||
virtual td::Status commit(CellStorer &) = 0;
|
||||
virtual std::shared_ptr<CellDbReader> get_cell_db_reader() = 0;
|
||||
|
||||
|
@ -65,19 +81,31 @@ class DynamicBagOfCellsDb {
|
|||
virtual td::Status set_loader(std::unique_ptr<CellLoader> loader) = 0;
|
||||
|
||||
virtual void set_celldb_compress_depth(td::uint32 value) = 0;
|
||||
virtual vm::ExtCellCreator& as_ext_cell_creator() = 0;
|
||||
virtual vm::ExtCellCreator &as_ext_cell_creator() = 0;
|
||||
|
||||
static std::unique_ptr<DynamicBagOfCellsDb> create();
|
||||
|
||||
struct CreateInMemoryOptions {
|
||||
size_t extra_threads{std::thread::hardware_concurrency()};
|
||||
bool verbose{true};
|
||||
// Allocated DataCels will never be deleted
|
||||
bool use_arena{false};
|
||||
// Almost no overhead in memory during creation, but will scan database twice
|
||||
bool use_less_memory_during_creation{true};
|
||||
};
|
||||
static std::unique_ptr<DynamicBagOfCellsDb> create_in_memory(td::KeyValueReader *kv, CreateInMemoryOptions options);
|
||||
|
||||
class AsyncExecutor {
|
||||
public:
|
||||
virtual ~AsyncExecutor() {}
|
||||
virtual ~AsyncExecutor() {
|
||||
}
|
||||
virtual void execute_async(std::function<void()> f) = 0;
|
||||
virtual void execute_sync(std::function<void()> f) = 0;
|
||||
};
|
||||
|
||||
virtual void load_cell_async(td::Slice hash, std::shared_ptr<AsyncExecutor> executor,
|
||||
td::Promise<Ref<DataCell>> promise) = 0;
|
||||
virtual void prepare_commit_async(std::shared_ptr<AsyncExecutor> executor, td::Promise<td::Unit> promise) = 0;
|
||||
};
|
||||
|
||||
} // namespace vm
|
||||
|
|
988
crypto/vm/db/InMemoryBagOfCellsDb.cpp
Normal file
988
crypto/vm/db/InMemoryBagOfCellsDb.cpp
Normal file
|
@ -0,0 +1,988 @@
|
|||
#include "CellStorage.h"
|
||||
#include "DynamicBagOfCellsDb.h"
|
||||
#include "td/utils/Timer.h"
|
||||
#include "td/utils/base64.h"
|
||||
#include "td/utils/format.h"
|
||||
#include "td/utils/int_types.h"
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/port/Stat.h"
|
||||
#include "vm/cells/CellHash.h"
|
||||
#include "vm/cells/CellSlice.h"
|
||||
#include "vm/cells/DataCell.h"
|
||||
#include "vm/cells/ExtCell.h"
|
||||
|
||||
#include "td/utils/HashMap.h"
|
||||
#include "td/utils/HashSet.h"
|
||||
|
||||
#include <optional>
|
||||
|
||||
#if TD_PORT_POSIX
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
namespace vm {
|
||||
namespace {
|
||||
constexpr bool use_dense_hash_map = true;
|
||||
|
||||
template <class F>
|
||||
void parallel_run(size_t n, F &&run_task, size_t extra_threads_n) {
|
||||
std::atomic<size_t> next_task_id{0};
|
||||
auto loop = [&] {
|
||||
while (true) {
|
||||
auto task_id = next_task_id++;
|
||||
if (task_id >= n) {
|
||||
break;
|
||||
}
|
||||
run_task(task_id);
|
||||
}
|
||||
};
|
||||
|
||||
// NB: it could be important that td::thread is used, not std::thread
|
||||
std::vector<td::thread> threads;
|
||||
for (size_t i = 0; i < extra_threads_n; i++) {
|
||||
threads.emplace_back(loop);
|
||||
}
|
||||
loop();
|
||||
for (auto &thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
threads.clear();
|
||||
}
|
||||
|
||||
struct UniqueAccess {
|
||||
struct Release {
|
||||
void operator()(UniqueAccess *access) const {
|
||||
if (access) {
|
||||
access->release();
|
||||
}
|
||||
}
|
||||
};
|
||||
using Lock = std::unique_ptr<UniqueAccess, Release>;
|
||||
Lock lock() {
|
||||
CHECK(!locked_.exchange(true));
|
||||
return Lock(this);
|
||||
}
|
||||
|
||||
private:
|
||||
std::atomic<bool> locked_{false};
|
||||
void release() {
|
||||
locked_ = false;
|
||||
}
|
||||
};
|
||||
class DefaultPrunnedCellCreator : public ExtCellCreator {
|
||||
public:
|
||||
td::Result<Ref<Cell>> ext_cell(Cell::LevelMask level_mask, td::Slice hash, td::Slice depth) override {
|
||||
TRY_RESULT(cell, PrunnedCell<td::Unit>::create(PrunnedCellInfo{level_mask, hash, depth}, td::Unit{}));
|
||||
return cell;
|
||||
}
|
||||
};
|
||||
|
||||
class ArenaPrunnedCellCreator : public ExtCellCreator {
|
||||
struct ArenaAllocator {
|
||||
ArenaAllocator() {
|
||||
// only one instance ever
|
||||
static UniqueAccess unique_access;
|
||||
[[maybe_unused]] auto ptr = unique_access.lock().release();
|
||||
}
|
||||
std::mutex mutex;
|
||||
struct Deleter {
|
||||
static constexpr size_t batch_size = 1 << 24;
|
||||
#if TD_PORT_POSIX
|
||||
static std::unique_ptr<char, Deleter> alloc() {
|
||||
char *ptr = reinterpret_cast<char *>(
|
||||
mmap(NULL, batch_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
|
||||
CHECK(ptr != nullptr);
|
||||
return std::unique_ptr<char, Deleter>(ptr);
|
||||
}
|
||||
void operator()(char *ptr) const {
|
||||
munmap(ptr, batch_size);
|
||||
}
|
||||
#else
|
||||
static std::unique_ptr<char, Deleter> alloc() {
|
||||
auto ptr = reinterpret_cast<char *>(malloc(batch_size));
|
||||
CHECK(ptr != nullptr);
|
||||
return std::unique_ptr<char, Deleter>(ptr);
|
||||
}
|
||||
void operator()(char *ptr) const {
|
||||
free(ptr);
|
||||
}
|
||||
#endif
|
||||
};
|
||||
std::vector<std::unique_ptr<char, Deleter>> arena;
|
||||
td::uint64 arena_generation{0};
|
||||
|
||||
td::MutableSlice alloc_batch() {
|
||||
auto batch = Deleter::alloc();
|
||||
auto res = td::MutableSlice(batch.get(), Deleter::batch_size);
|
||||
std::lock_guard<std::mutex> guard(mutex);
|
||||
arena.emplace_back(std::move(batch));
|
||||
return res;
|
||||
}
|
||||
|
||||
char *alloc(size_t size) {
|
||||
thread_local td::MutableSlice batch;
|
||||
thread_local td::uint64 batch_generation{0};
|
||||
auto aligned_size = (size + 7) / 8 * 8;
|
||||
if (batch.size() < size || batch_generation != arena_generation) {
|
||||
batch = alloc_batch();
|
||||
batch_generation = arena_generation;
|
||||
}
|
||||
auto res = batch.begin();
|
||||
batch.remove_prefix(aligned_size);
|
||||
return res;
|
||||
}
|
||||
void clear() {
|
||||
std::lock_guard<std::mutex> guard(mutex);
|
||||
arena_generation++;
|
||||
td::reset_to_empty(arena);
|
||||
}
|
||||
};
|
||||
static ArenaAllocator arena_;
|
||||
static td::ThreadSafeCounter cells_count_;
|
||||
|
||||
public:
|
||||
struct Counter {
|
||||
Counter() {
|
||||
cells_count_.add(1);
|
||||
}
|
||||
Counter(Counter &&other) {
|
||||
cells_count_.add(1);
|
||||
}
|
||||
Counter(const Counter &other) {
|
||||
cells_count_.add(1);
|
||||
}
|
||||
~Counter() {
|
||||
cells_count_.add(-1);
|
||||
}
|
||||
};
|
||||
|
||||
struct Allocator {
|
||||
template <class T, class... ArgsT>
|
||||
std::unique_ptr<PrunnedCell<Counter>> make_unique(ArgsT &&...args) {
|
||||
auto *ptr = arena_.alloc(sizeof(T));
|
||||
T *obj = new (ptr) T(std::forward<ArgsT>(args)...);
|
||||
return std::unique_ptr<T>(obj);
|
||||
}
|
||||
};
|
||||
td::Result<Ref<Cell>> ext_cell(Cell::LevelMask level_mask, td::Slice hash, td::Slice depth) override {
|
||||
Allocator allocator;
|
||||
TRY_RESULT(cell, PrunnedCell<Counter>::create(allocator, PrunnedCellInfo{level_mask, hash, depth}, Counter()));
|
||||
return cell;
|
||||
}
|
||||
static td::int64 count() {
|
||||
return cells_count_.sum();
|
||||
}
|
||||
static void clear_arena() {
|
||||
LOG_CHECK(cells_count_.sum() == 0) << cells_count_.sum();
|
||||
arena_.clear();
|
||||
}
|
||||
};
|
||||
td::ThreadSafeCounter ArenaPrunnedCellCreator::cells_count_;
|
||||
ArenaPrunnedCellCreator::ArenaAllocator ArenaPrunnedCellCreator::arena_;
|
||||
|
||||
struct CellInfo {
|
||||
mutable td::int32 db_refcnt{0};
|
||||
Ref<DataCell> cell;
|
||||
};
|
||||
static_assert(sizeof(CellInfo) == 16);
|
||||
|
||||
CellHash as_cell_hash(const CellInfo &info) {
|
||||
return info.cell->get_hash();
|
||||
}
|
||||
|
||||
struct CellInfoHashTableBaseline {
|
||||
td::HashSet<CellInfo, CellHashF, CellEqF> ht_;
|
||||
const CellInfo *find(CellHash hash) const {
|
||||
if (auto it = ht_.find(hash); it != ht_.end()) {
|
||||
return &*it;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
void erase(CellHash hash) {
|
||||
auto it = ht_.find(hash);
|
||||
CHECK(it != ht_.end());
|
||||
ht_.erase(it);
|
||||
}
|
||||
void insert(CellInfo info) {
|
||||
ht_.insert(std::move(info));
|
||||
}
|
||||
template <class Iterator>
|
||||
void init_from(Iterator begin, Iterator end) {
|
||||
ht_ = td::HashSet<CellInfo, CellHashF, CellEqF>(begin, end);
|
||||
}
|
||||
size_t size() const {
|
||||
return ht_.size();
|
||||
}
|
||||
auto begin() const {
|
||||
return ht_.begin();
|
||||
}
|
||||
auto end() const {
|
||||
return ht_.end();
|
||||
}
|
||||
size_t bucket_count() const {
|
||||
return ht_.bucket_count();
|
||||
}
|
||||
template <class F>
|
||||
auto for_each(F &&f) {
|
||||
for (auto &it : ht_) {
|
||||
f(it);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
struct CellInfoHashTableDense {
|
||||
size_t dense_ht_size_{0};
|
||||
size_t dense_ht_buckets_{1};
|
||||
std::vector<size_t> dense_ht_offsets_{1};
|
||||
std::vector<CellInfo> dense_ht_values_;
|
||||
td::HashSet<CellInfo, CellHashF, CellEqF> new_ht_;
|
||||
size_t dense_choose_bucket(const CellHash &hash) const {
|
||||
return cell_hash_slice_hash(hash.as_slice()) % dense_ht_buckets_;
|
||||
}
|
||||
const CellInfo *dense_find(CellHash hash) const {
|
||||
auto bucket_i = dense_choose_bucket(hash);
|
||||
auto begin = dense_ht_values_.begin() + dense_ht_offsets_[bucket_i];
|
||||
auto end = dense_ht_values_.begin() + dense_ht_offsets_[bucket_i + 1];
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
if (it->cell.not_null() && it->cell->get_hash() == hash) {
|
||||
return &*it;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
CellInfo *dense_find_empty(CellHash hash) {
|
||||
auto bucket_i = dense_choose_bucket(hash);
|
||||
auto begin = dense_ht_values_.begin() + dense_ht_offsets_[bucket_i];
|
||||
auto end = dense_ht_values_.begin() + dense_ht_offsets_[bucket_i + 1];
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
if (it->cell.is_null()) {
|
||||
return &*it;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
const CellInfo *find(CellHash hash) const {
|
||||
if (auto it = new_ht_.find(hash); it != new_ht_.end()) {
|
||||
return &*it;
|
||||
}
|
||||
if (auto it = dense_find(hash)) {
|
||||
return it;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
void erase(CellHash hash) {
|
||||
if (auto it = new_ht_.find(hash); it != new_ht_.end()) {
|
||||
new_ht_.erase(it);
|
||||
return;
|
||||
}
|
||||
auto info = dense_find(hash);
|
||||
CHECK(info && info->db_refcnt > 0);
|
||||
info->db_refcnt = 0;
|
||||
const_cast<CellInfo *>(info)->cell = {};
|
||||
CHECK(dense_ht_size_ > 0);
|
||||
dense_ht_size_--;
|
||||
}
|
||||
|
||||
void insert(CellInfo info) {
|
||||
if (auto dest = dense_find_empty(info.cell->get_hash())) {
|
||||
*dest = std::move(info);
|
||||
dense_ht_size_++;
|
||||
return;
|
||||
}
|
||||
new_ht_.insert(std::move(info));
|
||||
}
|
||||
template <class Iterator>
|
||||
void init_from(Iterator begin, Iterator end) {
|
||||
auto size = td::narrow_cast<size_t>(std::distance(begin, end));
|
||||
dense_ht_buckets_ = std::max(size_t(1), size_t(size / 8));
|
||||
|
||||
std::vector<size_t> offsets(dense_ht_buckets_ + 2);
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
auto bucket_i = dense_choose_bucket(it->cell->get_hash());
|
||||
offsets[bucket_i + 2]++;
|
||||
}
|
||||
for (size_t i = 1; i < offsets.size(); i++) {
|
||||
offsets[i] += offsets[i - 1];
|
||||
}
|
||||
dense_ht_values_.resize(size);
|
||||
for (auto it = begin; it != end; ++it) {
|
||||
auto bucket_i = dense_choose_bucket(it->cell->get_hash());
|
||||
dense_ht_values_[offsets[bucket_i + 1]++] = std::move(*it);
|
||||
}
|
||||
CHECK(offsets[0] == 0);
|
||||
CHECK(offsets[offsets.size() - 1] == size);
|
||||
CHECK(offsets[offsets.size() - 2] == size);
|
||||
dense_ht_offsets_ = std::move(offsets);
|
||||
dense_ht_size_ = size;
|
||||
}
|
||||
size_t size() const {
|
||||
return dense_ht_size_ + new_ht_.size();
|
||||
}
|
||||
template <class F>
|
||||
auto for_each(F &&f) {
|
||||
for (auto &it : dense_ht_values_) {
|
||||
if (it.cell.not_null()) {
|
||||
f(it);
|
||||
}
|
||||
}
|
||||
for (auto &it : new_ht_) {
|
||||
f(it);
|
||||
}
|
||||
}
|
||||
size_t bucket_count() const {
|
||||
return new_ht_.bucket_count() + dense_ht_values_.size();
|
||||
}
|
||||
};
|
||||
|
||||
using CellInfoHashTable = std::conditional_t<use_dense_hash_map, CellInfoHashTableDense, CellInfoHashTableBaseline>;
|
||||
|
||||
class CellStorage {
|
||||
struct PrivateTag {};
|
||||
struct CellBucket;
|
||||
struct None {
|
||||
void operator()(CellBucket *bucket) {
|
||||
}
|
||||
};
|
||||
struct CellBucketRef {
|
||||
UniqueAccess::Lock lock;
|
||||
std::unique_ptr<CellBucket, None> bucket;
|
||||
CellBucket &operator*() {
|
||||
return *bucket;
|
||||
}
|
||||
CellBucket *operator->() {
|
||||
return bucket.get();
|
||||
}
|
||||
};
|
||||
struct CellBucket {
|
||||
mutable UniqueAccess access_;
|
||||
CellInfoHashTable infos_;
|
||||
std::vector<CellInfo> cells_;
|
||||
std::vector<Ref<DataCell>> roots_;
|
||||
size_t boc_count_{0};
|
||||
[[maybe_unused]] char pad3[TD_CONCURRENCY_PAD];
|
||||
|
||||
void clear() {
|
||||
td::reset_to_empty(infos_);
|
||||
td::reset_to_empty(cells_);
|
||||
td::reset_to_empty(roots_);
|
||||
}
|
||||
|
||||
CellBucketRef unique_access() const {
|
||||
auto lock = access_.lock();
|
||||
return CellBucketRef{.lock = std::move(lock),
|
||||
.bucket = std::unique_ptr<CellBucket, None>(const_cast<CellBucket *>(this))};
|
||||
}
|
||||
};
|
||||
std::array<CellBucket, 256> buckets_{};
|
||||
bool inited_{false};
|
||||
|
||||
const CellBucket &get_bucket(size_t i) const {
|
||||
return buckets_.at(i);
|
||||
}
|
||||
const CellBucket &get_bucket(const CellHash &hash) const {
|
||||
return get_bucket(hash.as_array()[0]);
|
||||
}
|
||||
|
||||
mutable UniqueAccess local_access_;
|
||||
td::HashSet<Ref<DataCell>, CellHashF, CellEqF> local_roots_;
|
||||
DynamicBagOfCellsDb::Stats stats_;
|
||||
|
||||
mutable std::mutex root_mutex_;
|
||||
td::HashSet<Ref<DataCell>, CellHashF, CellEqF> roots_;
|
||||
|
||||
public:
|
||||
std::optional<CellInfo> get_info(const CellHash &hash) const {
|
||||
auto lock = local_access_.lock();
|
||||
auto &bucket = get_bucket(hash);
|
||||
if (auto info_ptr = bucket.infos_.find(hash)) {
|
||||
return *info_ptr;
|
||||
}
|
||||
return {};
|
||||
}
|
||||
|
||||
DynamicBagOfCellsDb::Stats get_stats() {
|
||||
auto unique_access = local_access_.lock();
|
||||
auto stats = stats_;
|
||||
auto add_stat = [&stats](auto key, auto value) {
|
||||
stats.custom_stats.emplace_back(std::move(key), PSTRING() << value);
|
||||
};
|
||||
if constexpr (use_dense_hash_map) {
|
||||
size_t dense_ht_capacity = 0;
|
||||
size_t new_ht_capacity = 0;
|
||||
size_t dense_ht_size = 0;
|
||||
size_t new_ht_size = 0;
|
||||
for_each_bucket(0, [&](auto bucket_id, CellBucket &bucket) {
|
||||
dense_ht_capacity += bucket.infos_.dense_ht_values_.size();
|
||||
dense_ht_size += bucket.infos_.dense_ht_size_;
|
||||
new_ht_capacity += bucket.infos_.new_ht_.bucket_count();
|
||||
new_ht_size += bucket.infos_.new_ht_.size();
|
||||
});
|
||||
auto size = new_ht_size + dense_ht_size;
|
||||
auto capacity = new_ht_capacity + dense_ht_capacity;
|
||||
add_stat("ht.capacity", capacity);
|
||||
add_stat("ht.size", size);
|
||||
add_stat("ht.load", double(size) / std::max(1.0, double(capacity)));
|
||||
add_stat("ht.dense_ht_capacity", dense_ht_capacity);
|
||||
add_stat("ht.dense_ht_size", dense_ht_size);
|
||||
add_stat("ht.dense_ht_load", double(dense_ht_size) / std::max(1.0, double(dense_ht_capacity)));
|
||||
add_stat("ht.new_ht_capacity", new_ht_capacity);
|
||||
add_stat("ht.new_ht_size", new_ht_size);
|
||||
add_stat("ht.new_ht_load", double(new_ht_size) / std::max(1.0, double(new_ht_capacity)));
|
||||
} else {
|
||||
size_t capacity = 0;
|
||||
size_t size = 0;
|
||||
for_each_bucket(0, [&](auto bucket_id, CellBucket &bucket) {
|
||||
capacity += bucket.infos_.bucket_count();
|
||||
size += bucket.infos_.size();
|
||||
});
|
||||
add_stat("ht.capacity", capacity);
|
||||
add_stat("ht.size", size);
|
||||
add_stat("ht.load", double(size) / std::max(1.0, double(capacity)));
|
||||
}
|
||||
CHECK(td::narrow_cast<size_t>(stats.roots_total_count) == local_roots_.size());
|
||||
return stats;
|
||||
}
|
||||
void apply_stats_diff(DynamicBagOfCellsDb::Stats diff) {
|
||||
auto unique_access = local_access_.lock();
|
||||
stats_.apply_diff(diff);
|
||||
CHECK(td::narrow_cast<size_t>(stats_.roots_total_count) == local_roots_.size());
|
||||
size_t cells_count{0};
|
||||
for_each_bucket(0, [&](size_t bucket_id, auto &bucket) { cells_count += bucket.infos_.size(); });
|
||||
CHECK(td::narrow_cast<size_t>(stats_.cells_total_count) == cells_count);
|
||||
}
|
||||
|
||||
td::Result<Ref<DataCell>> load_cell(const CellHash &hash) const {
|
||||
auto lock = local_access_.lock();
|
||||
auto &bucket = get_bucket(hash);
|
||||
if (auto info_ptr = bucket.infos_.find(hash)) {
|
||||
return info_ptr->cell;
|
||||
}
|
||||
return td::Status::Error("not found");
|
||||
}
|
||||
|
||||
td::Result<Ref<DataCell>> load_root_local(const CellHash &hash) const {
|
||||
auto lock = local_access_.lock();
|
||||
if (auto it = local_roots_.find(hash); it != local_roots_.end()) {
|
||||
return *it;
|
||||
}
|
||||
return td::Status::Error("not found");
|
||||
}
|
||||
td::Result<Ref<DataCell>> load_root_shared(const CellHash &hash) const {
|
||||
std::lock_guard<std::mutex> lock(root_mutex_);
|
||||
if (auto it = roots_.find(hash); it != roots_.end()) {
|
||||
return *it;
|
||||
}
|
||||
return td::Status::Error("not found");
|
||||
}
|
||||
|
||||
void erase(const CellHash &hash) {
|
||||
auto lock = local_access_.lock();
|
||||
auto bucket = get_bucket(hash).unique_access();
|
||||
bucket->infos_.erase(hash);
|
||||
if (auto local_it = local_roots_.find(hash); local_it != local_roots_.end()) {
|
||||
local_roots_.erase(local_it);
|
||||
std::lock_guard<std::mutex> root_lock(root_mutex_);
|
||||
auto shared_it = roots_.find(hash);
|
||||
CHECK(shared_it != roots_.end());
|
||||
roots_.erase(shared_it);
|
||||
CHECK(stats_.roots_total_count > 0);
|
||||
stats_.roots_total_count--;
|
||||
}
|
||||
}
|
||||
|
||||
void add_new_root(Ref<DataCell> cell) {
|
||||
auto lock = local_access_.lock();
|
||||
if (local_roots_.insert(cell).second) {
|
||||
std::lock_guard<std::mutex> lock(root_mutex_);
|
||||
roots_.insert(std::move(cell));
|
||||
stats_.roots_total_count++;
|
||||
}
|
||||
}
|
||||
|
||||
void set(td::int32 refcnt, Ref<DataCell> cell) {
|
||||
auto lock = local_access_.lock();
|
||||
//LOG(ERROR) << "setting refcnt to " << refcnt << ", cell " << td::base64_encode(cell->get_hash().as_slice());
|
||||
auto hash = cell->get_hash();
|
||||
auto bucket = get_bucket(hash).unique_access();
|
||||
if (auto info_ptr = bucket->infos_.find(hash)) {
|
||||
CHECK(info_ptr->cell.get() == cell.get());
|
||||
info_ptr->db_refcnt = refcnt;
|
||||
} else {
|
||||
bucket->infos_.insert({.db_refcnt = refcnt, .cell = std::move(cell)});
|
||||
}
|
||||
}
|
||||
|
||||
template <class F>
|
||||
static td::unique_ptr<CellStorage> build(DynamicBagOfCellsDb::CreateInMemoryOptions options,
|
||||
F &¶llel_scan_cells) {
|
||||
auto storage = td::make_unique<CellStorage>(PrivateTag{});
|
||||
storage->do_build(options, parallel_scan_cells);
|
||||
return storage;
|
||||
}
|
||||
|
||||
~CellStorage() {
|
||||
clear();
|
||||
}
|
||||
CellStorage() = delete;
|
||||
explicit CellStorage(PrivateTag) {
|
||||
}
|
||||
|
||||
private:
|
||||
template <class F>
|
||||
void do_build(DynamicBagOfCellsDb::CreateInMemoryOptions options, F &¶llel_scan_cells) {
|
||||
auto verbose = options.verbose;
|
||||
td::Slice P = "loading in-memory cell database: ";
|
||||
LOG_IF(WARNING, verbose) << P << "start with options use_arena=" << options.use_arena
|
||||
<< " use_less_memory_during_creation=" << options.use_less_memory_during_creation
|
||||
<< " use_dense_hash_map=" << use_dense_hash_map;
|
||||
auto full_timer = td::Timer();
|
||||
auto lock = local_access_.lock();
|
||||
CHECK(ArenaPrunnedCellCreator::count() == 0);
|
||||
ArenaPrunnedCellCreator arena_pc_creator;
|
||||
DefaultPrunnedCellCreator default_pc_creator;
|
||||
|
||||
auto timer = td::Timer();
|
||||
td::int64 cell_count{0};
|
||||
td::int64 desc_count{0};
|
||||
if (options.use_less_memory_during_creation) {
|
||||
auto [new_cell_count, new_desc_count] = parallel_scan_cells(
|
||||
default_pc_creator, options.use_arena,
|
||||
[&](td::int32 refcnt, Ref<DataCell> cell) { initial_set_without_refs(refcnt, std::move(cell)); });
|
||||
cell_count = new_cell_count;
|
||||
desc_count = new_desc_count;
|
||||
} else {
|
||||
auto [new_cell_count, new_desc_count] =
|
||||
parallel_scan_cells(arena_pc_creator, options.use_arena,
|
||||
[&](td::int32 refcnt, Ref<DataCell> cell) { initial_set(refcnt, std::move(cell)); });
|
||||
cell_count = new_cell_count;
|
||||
desc_count = new_desc_count;
|
||||
}
|
||||
LOG_IF(WARNING, verbose) << P << "cells loaded in " << timer.elapsed() << "s, cells_count= " << cell_count
|
||||
<< " prunned_cells_count=" << ArenaPrunnedCellCreator::count();
|
||||
|
||||
timer = td::Timer();
|
||||
for_each_bucket(options.extra_threads, [&](size_t bucket_id, auto &bucket) { build_hashtable(bucket); });
|
||||
|
||||
size_t ht_capacity = 0;
|
||||
size_t ht_size = 0;
|
||||
for_each_bucket(0, [&](size_t bucket_id, auto &bucket) {
|
||||
ht_size += bucket.infos_.size();
|
||||
ht_capacity += bucket.infos_.bucket_count();
|
||||
});
|
||||
double load_factor = double(ht_size) / std::max(double(ht_capacity), 1.0);
|
||||
LOG_IF(WARNING, verbose) << P << "hashtable created in " << timer.elapsed()
|
||||
<< "s, hashtables_expected_size=" << td::format::as_size(ht_capacity * sizeof(CellInfo))
|
||||
<< " load_factor=" << load_factor;
|
||||
|
||||
timer = td::Timer();
|
||||
if (options.use_less_memory_during_creation) {
|
||||
auto [new_cell_count, new_desc_count] =
|
||||
parallel_scan_cells(default_pc_creator, false,
|
||||
[&](td::int32 refcnt, Ref<DataCell> cell) { secondary_set(refcnt, std::move(cell)); });
|
||||
CHECK(new_cell_count == cell_count);
|
||||
CHECK(new_desc_count == desc_count);
|
||||
} else {
|
||||
for_each_bucket(options.extra_threads, [&](size_t bucket_id, auto &bucket) { reset_refs(bucket); });
|
||||
}
|
||||
LOG_IF(WARNING, verbose) << P << "refs rearranged in " << timer.elapsed() << "s";
|
||||
|
||||
timer = td::Timer();
|
||||
using Stats = DynamicBagOfCellsDb::Stats;
|
||||
std::vector<Stats> bucket_stats(buckets_.size());
|
||||
std::atomic<size_t> boc_count{0};
|
||||
for_each_bucket(options.extra_threads, [&](size_t bucket_id, auto &bucket) {
|
||||
bucket_stats[bucket_id] = validate_bucket_a(bucket, options.use_arena);
|
||||
boc_count += bucket.boc_count_;
|
||||
});
|
||||
for_each_bucket(options.extra_threads, [&](size_t bucket_id, auto &bucket) { validate_bucket_b(bucket); });
|
||||
stats_ = {};
|
||||
for (auto &bucket_stat : bucket_stats) {
|
||||
stats_.apply_diff(bucket_stat);
|
||||
}
|
||||
LOG_IF(WARNING, verbose) << P << "refcnt validated in " << timer.elapsed() << "s";
|
||||
|
||||
timer = td::Timer();
|
||||
build_roots();
|
||||
LOG_IF(WARNING, verbose) << P << "roots hashtable built in " << timer.elapsed() << "s";
|
||||
ArenaPrunnedCellCreator::clear_arena();
|
||||
LOG_IF(WARNING, verbose) << P << "arena cleared in " << timer.elapsed();
|
||||
|
||||
lock.reset();
|
||||
auto r_mem_stat = td::mem_stat();
|
||||
td::MemStat mem_stat;
|
||||
if (r_mem_stat.is_ok()) {
|
||||
mem_stat = r_mem_stat.move_as_ok();
|
||||
}
|
||||
auto stats = get_stats();
|
||||
td::StringBuilder sb;
|
||||
for (auto &[key, value] : stats.custom_stats) {
|
||||
sb << "\n\t" << key << "=" << value;
|
||||
}
|
||||
LOG_IF(ERROR, desc_count != 0 && desc_count != stats.roots_total_count + 1)
|
||||
<< "desc<> keys count is " << desc_count << " wich is different from roots count " << stats.roots_total_count;
|
||||
LOG_IF(WARNING, verbose)
|
||||
<< P << "done in " << full_timer.elapsed() << "\n\troots_count=" << stats.roots_total_count << "\n\t"
|
||||
<< desc_count << "\n\tcells_count=" << stats.cells_total_count
|
||||
<< "\n\tcells_size=" << td::format::as_size(stats.cells_total_size) << "\n\tboc_count=" << boc_count.load()
|
||||
<< sb.as_cslice() << "\n\tdata_cells_size=" << td::format::as_size(sizeof(DataCell) * stats.cells_total_count)
|
||||
<< "\n\tdata_cell_size=" << sizeof(DataCell) << "\n\texpected_memory_used="
|
||||
<< td::format::as_size(stats.cells_total_count * (sizeof(DataCell) + sizeof(CellInfo) * 3 / 2) +
|
||||
stats.cells_total_size)
|
||||
<< "\n\tbest_possible_memory_used"
|
||||
<< td::format::as_size(stats.cells_total_count * (sizeof(DataCell) + sizeof(CellInfo)) + stats.cells_total_size)
|
||||
<< "\n\tmemory_used=" << td::format::as_size(mem_stat.resident_size_)
|
||||
<< "\n\tpeak_memory_used=" << td::format::as_size(mem_stat.resident_size_peak_);
|
||||
|
||||
inited_ = true;
|
||||
}
|
||||
|
||||
template <class F>
|
||||
void for_each_bucket(size_t extra_threads, F &&f) {
|
||||
parallel_run(
|
||||
buckets_.size(), [&](auto task_id) { f(task_id, *get_bucket(task_id).unique_access()); }, extra_threads);
|
||||
}
|
||||
|
||||
void clear() {
|
||||
auto unique_access = local_access_.lock();
|
||||
for_each_bucket(td::thread::hardware_concurrency(), [&](size_t bucket_id, auto &bucket) { bucket.clear(); });
|
||||
local_roots_.clear();
|
||||
{
|
||||
auto lock = std::lock_guard<std::mutex>(root_mutex_);
|
||||
roots_.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void initial_set(td::int32 refcnt, Ref<DataCell> cell) {
|
||||
CHECK(!inited_);
|
||||
auto bucket = get_bucket(cell->get_hash()).unique_access();
|
||||
bucket->cells_.push_back({.db_refcnt = refcnt, .cell = std::move(cell)});
|
||||
}
|
||||
|
||||
void initial_set_without_refs(td::int32 refcnt, Ref<DataCell> cell_ref) {
|
||||
CHECK(!inited_);
|
||||
auto bucket = get_bucket(cell_ref->get_hash()).unique_access();
|
||||
auto &cell = const_cast<DataCell &>(*cell_ref);
|
||||
for (unsigned i = 0; i < cell.size_refs(); i++) {
|
||||
auto to_destroy = cell.reset_ref_unsafe(i, Ref<Cell>(), false);
|
||||
if (to_destroy->is_loaded()) {
|
||||
bucket->boc_count_++;
|
||||
}
|
||||
}
|
||||
bucket->cells_.push_back({.db_refcnt = refcnt, .cell = std::move(cell_ref)});
|
||||
}
|
||||
|
||||
void secondary_set(td::int32 refcnt, Ref<DataCell> cell_copy) {
|
||||
CHECK(!inited_);
|
||||
auto bucket = get_bucket(cell_copy->get_hash()).unique_access();
|
||||
auto info = bucket->infos_.find(cell_copy->get_hash());
|
||||
CHECK(info);
|
||||
CellSlice cs(NoVm{}, std::move(cell_copy));
|
||||
auto &cell = const_cast<DataCell &>(*info->cell);
|
||||
CHECK(cs.size_refs() == cell.size_refs());
|
||||
for (unsigned i = 0; i < cell.size_refs(); i++) {
|
||||
auto prunned_cell_hash = cs.fetch_ref()->get_hash();
|
||||
auto &prunned_cell_bucket = get_bucket(prunned_cell_hash);
|
||||
auto full_cell_ptr = prunned_cell_bucket.infos_.find(prunned_cell_hash);
|
||||
CHECK(full_cell_ptr);
|
||||
auto full_cell = full_cell_ptr->cell;
|
||||
auto to_destroy = cell.reset_ref_unsafe(i, std::move(full_cell), false);
|
||||
CHECK(to_destroy.is_null());
|
||||
}
|
||||
}
|
||||
|
||||
void build_hashtable(CellBucket &bucket) {
|
||||
bucket.infos_.init_from(bucket.cells_.begin(), bucket.cells_.end());
|
||||
LOG_CHECK(bucket.infos_.size() == bucket.cells_.size()) << bucket.infos_.size() << " vs " << bucket.cells_.size();
|
||||
td::reset_to_empty(bucket.cells_);
|
||||
LOG_CHECK(bucket.cells_.capacity() == 0) << bucket.cells_.capacity();
|
||||
}
|
||||
|
||||
void reset_refs(CellBucket &bucket) {
|
||||
bucket.infos_.for_each([&](auto &it) {
|
||||
// This is generally very dangerous, but should be safe here
|
||||
auto &cell = const_cast<DataCell &>(*it.cell);
|
||||
for (unsigned i = 0; i < cell.size_refs(); i++) {
|
||||
auto prunned_cell = cell.get_ref_raw_ptr(i);
|
||||
auto prunned_cell_hash = prunned_cell->get_hash();
|
||||
auto &prunned_cell_bucket = get_bucket(prunned_cell_hash);
|
||||
auto full_cell_ptr = prunned_cell_bucket.infos_.find(prunned_cell_hash);
|
||||
CHECK(full_cell_ptr);
|
||||
auto full_cell = full_cell_ptr->cell;
|
||||
auto to_destroy = cell.reset_ref_unsafe(i, std::move(full_cell));
|
||||
if (!to_destroy->is_loaded()) {
|
||||
Ref<PrunnedCell<ArenaPrunnedCellCreator::Counter>> x(std::move(to_destroy));
|
||||
x->~PrunnedCell<ArenaPrunnedCellCreator::Counter>();
|
||||
x.release();
|
||||
} else {
|
||||
bucket.boc_count_++;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
DynamicBagOfCellsDb::Stats validate_bucket_a(CellBucket &bucket, bool use_arena) {
|
||||
DynamicBagOfCellsDb::Stats stats;
|
||||
bucket.infos_.for_each([&](auto &it) {
|
||||
int cell_ref_cnt = it.cell->get_refcnt();
|
||||
CHECK(it.db_refcnt + 1 + use_arena >= cell_ref_cnt);
|
||||
auto extra_refcnt = it.db_refcnt + 1 + use_arena - cell_ref_cnt;
|
||||
if (extra_refcnt != 0) {
|
||||
bucket.roots_.push_back(it.cell);
|
||||
stats.roots_total_count++;
|
||||
}
|
||||
stats.cells_total_count++;
|
||||
stats.cells_total_size += static_cast<td::int64>(it.cell->get_storage_size());
|
||||
});
|
||||
return stats;
|
||||
}
|
||||
void validate_bucket_b(CellBucket &bucket) {
|
||||
// sanity check
|
||||
bucket.infos_.for_each([&](auto &it) {
|
||||
CellSlice cs(NoVm{}, it.cell);
|
||||
while (cs.have_refs()) {
|
||||
CHECK(cs.fetch_ref().not_null());
|
||||
}
|
||||
});
|
||||
}
|
||||
void build_roots() {
|
||||
for (auto &it : buckets_) {
|
||||
for (auto &root : it.roots_) {
|
||||
local_roots_.insert(std::move(root));
|
||||
}
|
||||
td::reset_to_empty(it.roots_);
|
||||
}
|
||||
auto lock = std::lock_guard<std::mutex>(root_mutex_);
|
||||
roots_ = local_roots_;
|
||||
}
|
||||
};
|
||||
|
||||
class InMemoryBagOfCellsDb : public DynamicBagOfCellsDb {
|
||||
public:
|
||||
explicit InMemoryBagOfCellsDb(td::unique_ptr<CellStorage> storage) : storage_(std::move(storage)) {
|
||||
}
|
||||
|
||||
td::Result<Ref<DataCell>> load_cell(td::Slice hash) override {
|
||||
return storage_->load_cell(CellHash::from_slice(hash));
|
||||
}
|
||||
|
||||
td::Result<Ref<DataCell>> load_root(td::Slice hash) override {
|
||||
return storage_->load_root_local(CellHash::from_slice(hash));
|
||||
}
|
||||
td::Result<Ref<DataCell>> load_root_thread_safe(td::Slice hash) const override {
|
||||
return storage_->load_root_shared(CellHash::from_slice(hash));
|
||||
}
|
||||
|
||||
void inc(const Ref<Cell> &cell) override {
|
||||
if (cell.is_null()) {
|
||||
return;
|
||||
}
|
||||
if (cell->get_virtualization() != 0) {
|
||||
return;
|
||||
}
|
||||
to_inc_.push_back(cell);
|
||||
}
|
||||
|
||||
void dec(const Ref<Cell> &cell) override {
|
||||
if (cell.is_null()) {
|
||||
return;
|
||||
}
|
||||
if (cell->get_virtualization() != 0) {
|
||||
return;
|
||||
}
|
||||
to_dec_.push_back(cell);
|
||||
}
|
||||
|
||||
td::Status commit(CellStorer &cell_storer) override {
|
||||
if (!to_inc_.empty() || !to_dec_.empty()) {
|
||||
TRY_STATUS(prepare_commit());
|
||||
}
|
||||
|
||||
Stats diff;
|
||||
CHECK(to_dec_.empty());
|
||||
for (auto &it : info_) {
|
||||
auto &info = it.second;
|
||||
if (info.diff_refcnt == 0) {
|
||||
continue;
|
||||
}
|
||||
auto refcnt = td::narrow_cast<td::int32>(static_cast<td::int64>(info.db_refcnt) + info.diff_refcnt);
|
||||
CHECK(refcnt >= 0);
|
||||
if (refcnt > 0) {
|
||||
cell_storer.set(refcnt, info.cell, false);
|
||||
storage_->set(refcnt, info.cell);
|
||||
if (info.db_refcnt == 0) {
|
||||
diff.cells_total_count++;
|
||||
diff.cells_total_size += static_cast<td::int64>(info.cell->get_storage_size());
|
||||
}
|
||||
} else {
|
||||
cell_storer.erase(info.cell->get_hash().as_slice());
|
||||
storage_->erase(info.cell->get_hash());
|
||||
diff.cells_total_count--;
|
||||
diff.cells_total_size -= static_cast<td::int64>(info.cell->get_storage_size());
|
||||
}
|
||||
}
|
||||
storage_->apply_stats_diff(diff);
|
||||
info_ = {};
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Result<Stats> get_stats() override {
|
||||
return storage_->get_stats();
|
||||
}
|
||||
|
||||
// Not implemented or trivial or deprecated methods
|
||||
td::Status set_loader(std::unique_ptr<CellLoader> loader) override {
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status prepare_commit() override {
|
||||
CHECK(info_.empty());
|
||||
for (auto &to_inc : to_inc_) {
|
||||
auto new_root = do_inc(to_inc);
|
||||
storage_->add_new_root(std::move(new_root));
|
||||
}
|
||||
for (auto &to_dec : to_dec_) {
|
||||
do_dec(to_dec);
|
||||
}
|
||||
to_dec_ = {};
|
||||
to_inc_ = {};
|
||||
return td::Status::OK();
|
||||
}
|
||||
void prepare_commit_async(std::shared_ptr<AsyncExecutor> executor, td::Promise<td::Unit> promise) override {
|
||||
TRY_STATUS_PROMISE(promise, prepare_commit());
|
||||
promise.set_value(td::Unit());
|
||||
}
|
||||
Stats get_stats_diff() override {
|
||||
LOG(FATAL) << "Not implemented";
|
||||
return {};
|
||||
}
|
||||
std::shared_ptr<CellDbReader> get_cell_db_reader() override {
|
||||
return {};
|
||||
}
|
||||
void set_celldb_compress_depth(td::uint32 value) override {
|
||||
LOG(FATAL) << "Not implemented";
|
||||
}
|
||||
ExtCellCreator &as_ext_cell_creator() override {
|
||||
UNREACHABLE();
|
||||
}
|
||||
void load_cell_async(td::Slice hash, std::shared_ptr<AsyncExecutor> executor,
|
||||
td::Promise<Ref<DataCell>> promise) override {
|
||||
LOG(FATAL) << "Not implemented";
|
||||
}
|
||||
|
||||
private:
|
||||
td::unique_ptr<CellStorage> storage_;
|
||||
|
||||
struct Info {
|
||||
td::int32 db_refcnt{0};
|
||||
td::int32 diff_refcnt{0};
|
||||
Ref<DataCell> cell;
|
||||
};
|
||||
td::HashMap<CellHash, Info> info_;
|
||||
|
||||
std::unique_ptr<CellLoader> loader_;
|
||||
std::vector<Ref<Cell>> to_inc_;
|
||||
std::vector<Ref<Cell>> to_dec_;
|
||||
|
||||
Ref<DataCell> do_inc(Ref<Cell> cell) {
|
||||
auto cell_hash = cell->get_hash();
|
||||
if (auto it = info_.find(cell_hash); it != info_.end()) {
|
||||
CHECK(it->second.diff_refcnt != std::numeric_limits<td::int32>::max());
|
||||
it->second.diff_refcnt++;
|
||||
return it->second.cell;
|
||||
}
|
||||
if (auto o_info = storage_->get_info(cell_hash)) {
|
||||
info_.emplace(cell_hash, Info{.db_refcnt = o_info->db_refcnt, .diff_refcnt = 1, .cell = o_info->cell});
|
||||
return std::move(o_info->cell);
|
||||
}
|
||||
|
||||
CellSlice cs(NoVm{}, std::move(cell));
|
||||
CellBuilder cb;
|
||||
cb.store_bits(cs.data(), cs.size());
|
||||
while (cs.have_refs()) {
|
||||
auto ref = do_inc(cs.fetch_ref());
|
||||
cb.store_ref(std::move(ref));
|
||||
}
|
||||
auto res = cb.finalize(cs.is_special());
|
||||
CHECK(res->get_hash() == cell_hash);
|
||||
info_.emplace(cell_hash, Info{.db_refcnt = 0, .diff_refcnt = 1, .cell = res});
|
||||
return res;
|
||||
}
|
||||
|
||||
void do_dec(Ref<Cell> cell) {
|
||||
auto cell_hash = cell->get_hash();
|
||||
auto it = info_.find(cell_hash);
|
||||
if (it != info_.end()) {
|
||||
CHECK(it->second.diff_refcnt != std::numeric_limits<td::int32>::min());
|
||||
--it->second.diff_refcnt;
|
||||
} else {
|
||||
auto info = *storage_->get_info(cell_hash);
|
||||
it = info_.emplace(cell_hash, Info{.db_refcnt = info.db_refcnt, .diff_refcnt = -1, .cell = info.cell}).first;
|
||||
}
|
||||
if (it->second.diff_refcnt + it->second.db_refcnt != 0) {
|
||||
return;
|
||||
}
|
||||
CellSlice cs(NoVm{}, std::move(cell));
|
||||
while (cs.have_refs()) {
|
||||
do_dec(cs.fetch_ref());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
std::unique_ptr<DynamicBagOfCellsDb> DynamicBagOfCellsDb::create_in_memory(td::KeyValueReader *kv,
|
||||
CreateInMemoryOptions options) {
|
||||
if (kv == nullptr) {
|
||||
LOG_IF(WARNING, options.verbose) << "Create empty in-memory cells database (no key value is given)";
|
||||
auto storage = CellStorage::build(options, [](auto, auto, auto) { return std::make_pair(0, 0); });
|
||||
return std::make_unique<InMemoryBagOfCellsDb>(std::move(storage));
|
||||
}
|
||||
|
||||
std::vector<std::string> keys;
|
||||
keys.emplace_back("");
|
||||
for (td::uint32 c = 1; c <= 0xff; c++) {
|
||||
keys.emplace_back(1, static_cast<char>(c));
|
||||
}
|
||||
keys.emplace_back(33, static_cast<char>(0xff));
|
||||
|
||||
auto parallel_scan_cells = [&](ExtCellCreator &pc_creator, bool use_arena,
|
||||
auto &&f) -> std::pair<td::int64, td::int64> {
|
||||
std::atomic<td::int64> cell_count{0};
|
||||
std::atomic<td::int64> desc_count{0};
|
||||
parallel_run(
|
||||
keys.size() - 1,
|
||||
[&](auto task_id) {
|
||||
td::int64 local_cell_count = 0;
|
||||
td::int64 local_desc_count = 0;
|
||||
CHECK(!DataCell::use_arena);
|
||||
DataCell::use_arena = use_arena;
|
||||
kv->for_each_in_range(keys.at(task_id), keys.at(task_id + 1), [&](td::Slice key, td::Slice value) {
|
||||
if (td::begins_with(key, "desc") && key.size() != 32) {
|
||||
local_desc_count++;
|
||||
return td::Status::OK();
|
||||
}
|
||||
auto r_res = CellLoader::load(key, value.str(), true, pc_creator);
|
||||
if (r_res.is_error()) {
|
||||
LOG(ERROR) << r_res.error() << " at " << td::format::escaped(key);
|
||||
return td::Status::OK();
|
||||
}
|
||||
CHECK(key.size() == 32);
|
||||
CHECK(key.ubegin()[0] == task_id);
|
||||
auto res = r_res.move_as_ok();
|
||||
f(res.refcnt(), res.cell());
|
||||
local_cell_count++;
|
||||
return td::Status::OK();
|
||||
}).ensure();
|
||||
DataCell::use_arena = false;
|
||||
cell_count += local_cell_count;
|
||||
desc_count += local_desc_count;
|
||||
},
|
||||
options.extra_threads);
|
||||
return std::make_pair(cell_count.load(), desc_count.load());
|
||||
};
|
||||
|
||||
auto storage = CellStorage::build(options, parallel_scan_cells);
|
||||
return std::make_unique<InMemoryBagOfCellsDb>(std::move(storage));
|
||||
}
|
||||
} // namespace vm
|
|
@ -167,7 +167,7 @@ td::Result<Ref<Cell>> StaticBagOfCellsDb::create_ext_cell(Cell::LevelMask level_
|
|||
//
|
||||
class StaticBagOfCellsDbBaselineImpl : public StaticBagOfCellsDb {
|
||||
public:
|
||||
StaticBagOfCellsDbBaselineImpl(std::vector<Ref<Cell>> roots) : roots_(std::move(roots)) {
|
||||
explicit StaticBagOfCellsDbBaselineImpl(std::vector<Ref<Cell>> roots) : roots_(std::move(roots)) {
|
||||
}
|
||||
td::Result<size_t> get_root_count() override {
|
||||
return roots_.size();
|
||||
|
@ -233,7 +233,7 @@ class StaticBagOfCellsDbLazyImpl : public StaticBagOfCellsDb {
|
|||
return create_root_cell(std::move(data_cell));
|
||||
};
|
||||
|
||||
~StaticBagOfCellsDbLazyImpl() {
|
||||
~StaticBagOfCellsDbLazyImpl() override {
|
||||
//LOG(ERROR) << deserialize_cell_cnt_ << " " << deserialize_cell_hash_cnt_;
|
||||
get_thread_safe_counter().add(-1);
|
||||
}
|
||||
|
@ -314,11 +314,11 @@ class StaticBagOfCellsDbLazyImpl : public StaticBagOfCellsDb {
|
|||
td::RwMutex::ReadLock guard;
|
||||
if (info_.has_index) {
|
||||
TRY_RESULT(new_offset_view, data_.view(td::MutableSlice(arr, info_.offset_byte_size),
|
||||
info_.index_offset + idx * info_.offset_byte_size));
|
||||
info_.index_offset + (td::int64)idx * info_.offset_byte_size));
|
||||
offset_view = new_offset_view;
|
||||
} else {
|
||||
guard = index_data_rw_mutex_.lock_read().move_as_ok();
|
||||
offset_view = td::Slice(index_data_).substr(idx * info_.offset_byte_size, info_.offset_byte_size);
|
||||
offset_view = td::Slice(index_data_).substr((td::int64)idx * info_.offset_byte_size, info_.offset_byte_size);
|
||||
}
|
||||
|
||||
CHECK(offset_view.size() == (size_t)info_.offset_byte_size);
|
||||
|
@ -332,7 +332,7 @@ class StaticBagOfCellsDbLazyImpl : public StaticBagOfCellsDb {
|
|||
}
|
||||
char arr[8];
|
||||
TRY_RESULT(idx_view, data_.view(td::MutableSlice(arr, info_.ref_byte_size),
|
||||
info_.roots_offset + root_i * info_.ref_byte_size));
|
||||
info_.roots_offset + (td::int64)root_i * info_.ref_byte_size));
|
||||
CHECK(idx_view.size() == (size_t)info_.ref_byte_size);
|
||||
return info_.read_ref(idx_view.ubegin());
|
||||
}
|
||||
|
|
|
@ -113,7 +113,8 @@ class TonDbTransactionImpl;
|
|||
using TonDbTransaction = std::unique_ptr<TonDbTransactionImpl>;
|
||||
class TonDbTransactionImpl {
|
||||
public:
|
||||
SmartContractDb begin_smartcontract(td::Slice hash = {});
|
||||
|
||||
SmartContractDb begin_smartcontract(td::Slice hash = std::string(32, '\0'));
|
||||
|
||||
void commit_smartcontract(SmartContractDb txn);
|
||||
void commit_smartcontract(SmartContractDiff txn);
|
||||
|
@ -142,6 +143,20 @@ class TonDbTransactionImpl {
|
|||
friend bool operator<(td::Slice hash, const SmartContractInfo &info) {
|
||||
return hash < info.hash;
|
||||
}
|
||||
|
||||
struct Eq {
|
||||
using is_transparent = void; // Pred to use
|
||||
bool operator()(const SmartContractInfo &info, const SmartContractInfo &other_info) const { return info.hash == other_info.hash;}
|
||||
bool operator()(const SmartContractInfo &info, td::Slice hash) const { return info.hash == hash;}
|
||||
bool operator()(td::Slice hash, const SmartContractInfo &info) const { return info.hash == hash;}
|
||||
|
||||
};
|
||||
struct Hash {
|
||||
using is_transparent = void; // Pred to use
|
||||
using transparent_key_equal = Eq;
|
||||
size_t operator()(td::Slice hash) const { return cell_hash_slice_hash(hash); }
|
||||
size_t operator()(const SmartContractInfo &info) const { return cell_hash_slice_hash(info.hash);}
|
||||
};
|
||||
};
|
||||
|
||||
CellHashTable<SmartContractInfo> contracts_;
|
||||
|
|
|
@ -1779,7 +1779,7 @@ Ref<Cell> DictionaryFixed::dict_combine_with(Ref<Cell> dict1, Ref<Cell> dict2, t
|
|||
int mode, int skip1, int skip2) const {
|
||||
if (dict1.is_null()) {
|
||||
assert(!skip2);
|
||||
if ((mode & 1) && dict2.is_null()) {
|
||||
if ((mode & 1) && dict2.not_null()) {
|
||||
throw CombineError{};
|
||||
}
|
||||
return dict2;
|
||||
|
@ -1854,11 +1854,11 @@ Ref<Cell> DictionaryFixed::dict_combine_with(Ref<Cell> dict1, Ref<Cell> dict2, t
|
|||
key_buffer[-1] = 0;
|
||||
// combine left subtrees
|
||||
auto c1 = dict_combine_with(label1.remainder->prefetch_ref(0), label2.remainder->prefetch_ref(0), key_buffer,
|
||||
n - c - 1, total_key_len, combine_func);
|
||||
n - c - 1, total_key_len, combine_func, mode);
|
||||
key_buffer[-1] = 1;
|
||||
// combine right subtrees
|
||||
auto c2 = dict_combine_with(label1.remainder->prefetch_ref(1), label2.remainder->prefetch_ref(1), key_buffer,
|
||||
n - c - 1, total_key_len, combine_func);
|
||||
n - c - 1, total_key_len, combine_func, mode);
|
||||
label1.remainder.clear();
|
||||
label2.remainder.clear();
|
||||
// c1 and c2 are merged left and right children of dict1 and dict2
|
||||
|
|
|
@ -33,10 +33,12 @@ class LargeBocSerializer {
|
|||
public:
|
||||
using Hash = Cell::Hash;
|
||||
|
||||
explicit LargeBocSerializer(std::shared_ptr<CellDbReader> reader, td::CancellationToken cancellation_token = {})
|
||||
: reader(std::move(reader)), cancellation_token(std::move(cancellation_token)) {
|
||||
explicit LargeBocSerializer(std::shared_ptr<CellDbReader> reader) : reader(std::move(reader)) {
|
||||
}
|
||||
|
||||
void set_logger(BagOfCellsLogger* logger_ptr) {
|
||||
logger_ptr_ = logger_ptr;
|
||||
}
|
||||
void add_root(Hash root);
|
||||
td::Status import_cells();
|
||||
td::Status serialize(td::FileFd& fd, int mode);
|
||||
|
@ -44,6 +46,7 @@ class LargeBocSerializer {
|
|||
private:
|
||||
std::shared_ptr<CellDbReader> reader;
|
||||
struct CellInfo {
|
||||
Cell::Hash hash;
|
||||
std::array<int, 4> ref_idx;
|
||||
int idx;
|
||||
unsigned short serialized_size;
|
||||
|
@ -67,7 +70,7 @@ class LargeBocSerializer {
|
|||
return 4;
|
||||
}
|
||||
};
|
||||
std::map<Hash, CellInfo> cells;
|
||||
td::NodeHashMap<Hash, CellInfo> cells;
|
||||
std::vector<std::pair<const Hash, CellInfo>*> cell_list;
|
||||
struct RootInfo {
|
||||
RootInfo(Hash hash, int idx) : hash(hash), idx(idx) {
|
||||
|
@ -85,10 +88,7 @@ class LargeBocSerializer {
|
|||
int revisit(int cell_idx, int force = 0);
|
||||
td::uint64 compute_sizes(int mode, int& r_size, int& o_size);
|
||||
|
||||
td::CancellationToken cancellation_token;
|
||||
td::Timestamp log_speed_at_;
|
||||
size_t processed_cells_ = 0;
|
||||
static constexpr double LOG_SPEED_PERIOD = 120.0;
|
||||
BagOfCellsLogger* logger_ptr_{};
|
||||
};
|
||||
|
||||
void LargeBocSerializer::add_root(Hash root) {
|
||||
|
@ -96,16 +96,18 @@ void LargeBocSerializer::add_root(Hash root) {
|
|||
}
|
||||
|
||||
td::Status LargeBocSerializer::import_cells() {
|
||||
td::Timer timer;
|
||||
log_speed_at_ = td::Timestamp::in(LOG_SPEED_PERIOD);
|
||||
processed_cells_ = 0;
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->start_stage("import_cells");
|
||||
}
|
||||
for (auto& root : roots) {
|
||||
TRY_RESULT(idx, import_cell(root.hash));
|
||||
root.idx = idx;
|
||||
}
|
||||
reorder_cells();
|
||||
CHECK(!cell_list.empty());
|
||||
LOG(ERROR) << "serializer: import_cells took " << timer.elapsed() << "s, " << cell_count << " cells";
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->finish_stage(PSLICE() << cell_count << " cells");
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
|
@ -113,14 +115,8 @@ td::Result<int> LargeBocSerializer::import_cell(Hash hash, int depth) {
|
|||
if (depth > Cell::max_depth) {
|
||||
return td::Status::Error("error while importing a cell into a bag of cells: cell depth too large");
|
||||
}
|
||||
++processed_cells_;
|
||||
if (processed_cells_ % 1000 == 0) {
|
||||
TRY_STATUS(cancellation_token.check());
|
||||
}
|
||||
if (log_speed_at_.is_in_past()) {
|
||||
log_speed_at_ += LOG_SPEED_PERIOD;
|
||||
LOG(WARNING) << "serializer: import_cells " << (double)processed_cells_ / LOG_SPEED_PERIOD << " cells/s";
|
||||
processed_cells_ = 0;
|
||||
if (logger_ptr_) {
|
||||
TRY_STATUS(logger_ptr_->on_cell_processed());
|
||||
}
|
||||
auto it = cells.find(hash);
|
||||
if (it != cells.end()) {
|
||||
|
@ -306,7 +302,6 @@ td::uint64 LargeBocSerializer::compute_sizes(int mode, int& r_size, int& o_size)
|
|||
}
|
||||
|
||||
td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
||||
td::Timer timer;
|
||||
using Mode = BagOfCells::Mode;
|
||||
BagOfCells::Info info;
|
||||
if ((mode & Mode::WithCacheBits) && !(mode & Mode::WithIndex)) {
|
||||
|
@ -370,6 +365,9 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
DCHECK(writer.position() == info.index_offset);
|
||||
DCHECK((unsigned)cell_count == cell_list.size());
|
||||
if (info.has_index) {
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->start_stage("generate_index");
|
||||
}
|
||||
std::size_t offs = 0;
|
||||
for (int i = cell_count - 1; i >= 0; --i) {
|
||||
const auto& dc_info = cell_list[i]->second;
|
||||
|
@ -387,13 +385,20 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
fixed_offset = offs * 2 + dc_info.should_cache;
|
||||
}
|
||||
store_offset(fixed_offset);
|
||||
if (logger_ptr_) {
|
||||
TRY_STATUS(logger_ptr_->on_cell_processed());
|
||||
}
|
||||
}
|
||||
DCHECK(offs == info.data_size);
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->finish_stage("");
|
||||
}
|
||||
}
|
||||
DCHECK(writer.position() == info.data_offset);
|
||||
size_t keep_position = writer.position();
|
||||
log_speed_at_ = td::Timestamp::in(LOG_SPEED_PERIOD);
|
||||
processed_cells_ = 0;
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->start_stage("serialize");
|
||||
}
|
||||
for (int i = 0; i < cell_count; ++i) {
|
||||
auto hash = cell_list[cell_count - 1 - i]->first;
|
||||
const auto& dc_info = cell_list[cell_count - 1 - i]->second;
|
||||
|
@ -412,14 +417,8 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
DCHECK(k > i && k < cell_count);
|
||||
store_ref(k);
|
||||
}
|
||||
++processed_cells_;
|
||||
if (processed_cells_ % 1000 == 0) {
|
||||
TRY_STATUS(cancellation_token.check());
|
||||
}
|
||||
if (log_speed_at_.is_in_past()) {
|
||||
log_speed_at_ += LOG_SPEED_PERIOD;
|
||||
LOG(WARNING) << "serializer: serialize " << (double)processed_cells_ / LOG_SPEED_PERIOD << " cells/s";
|
||||
processed_cells_ = 0;
|
||||
if (logger_ptr_) {
|
||||
TRY_STATUS(logger_ptr_->on_cell_processed());
|
||||
}
|
||||
}
|
||||
DCHECK(writer.position() - keep_position == info.data_size);
|
||||
|
@ -429,8 +428,9 @@ td::Status LargeBocSerializer::serialize(td::FileFd& fd, int mode) {
|
|||
}
|
||||
DCHECK(writer.empty());
|
||||
TRY_STATUS(writer.finalize());
|
||||
LOG(ERROR) << "serializer: serialize took " << timer.elapsed() << "s, " << cell_count << " cells, "
|
||||
<< writer.position() << " bytes";
|
||||
if (logger_ptr_) {
|
||||
logger_ptr_->finish_stage(PSLICE() << cell_count << " cells, " << writer.position() << " bytes");
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
} // namespace
|
||||
|
@ -439,7 +439,9 @@ td::Status std_boc_serialize_to_file_large(std::shared_ptr<CellDbReader> reader,
|
|||
int mode, td::CancellationToken cancellation_token) {
|
||||
td::Timer timer;
|
||||
CHECK(reader != nullptr)
|
||||
LargeBocSerializer serializer(reader, std::move(cancellation_token));
|
||||
LargeBocSerializer serializer(reader);
|
||||
BagOfCellsLogger logger(std::move(cancellation_token));
|
||||
serializer.set_logger(&logger);
|
||||
serializer.add_root(root_hash);
|
||||
TRY_STATUS(serializer.import_cells());
|
||||
TRY_STATUS(serializer.serialize(fd, mode));
|
||||
|
|
|
@ -118,6 +118,7 @@ class VmState final : public VmStateInterface {
|
|||
stack_entry_gas_price = 1,
|
||||
runvm_gas_price = 40,
|
||||
hash_ext_entry_gas_price = 1,
|
||||
free_nested_cont_jump = 8,
|
||||
|
||||
rist255_mul_gas_price = 2000,
|
||||
rist255_mulbase_gas_price = 750,
|
||||
|
@ -366,11 +367,19 @@ class VmState final : public VmStateInterface {
|
|||
return cond ? c1_envelope(std::move(cont), save) : std::move(cont);
|
||||
}
|
||||
void c1_save_set(bool save = true);
|
||||
void fatal(void) const {
|
||||
void fatal() const {
|
||||
throw VmFatal{};
|
||||
}
|
||||
int jump_to(Ref<Continuation> cont) {
|
||||
return cont->is_unique() ? cont.unique_write().jump_w(this) : cont->jump(this);
|
||||
int res = 0, cnt = 0;
|
||||
while (cont.not_null()) {
|
||||
cont = cont->is_unique() ? cont.unique_write().jump_w(this, res) : cont->jump(this, res);
|
||||
cnt++;
|
||||
if (cnt > free_nested_cont_jump && global_version >= 9) {
|
||||
consume_gas(1);
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
static Ref<CellSlice> convert_code_cell(Ref<Cell> code_cell);
|
||||
bool try_commit();
|
||||
|
|
|
@ -110,3 +110,9 @@ Operations for working with Merkle proofs, where cells can have non-zero level a
|
|||
- Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent.
|
||||
- Allow unfreeze through external messages.
|
||||
- Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages.
|
||||
|
||||
## Version 9
|
||||
|
||||
- Fix `RAWRESERVE` action with flag `4` (use original balance of the account) by explicitly setting `original_balance` to `balance - msg_balance_remaining`.
|
||||
- Previously it did not work if storage fee was greater than the original balance.
|
||||
- Jumps to nested continuations of depth more than 8 consume 1 gas for eact subsequent continuation (this does not affect most of TVM code).
|
|
@ -27,7 +27,7 @@ set(EMULATOR_EMSCRIPTEN_SOURCE
|
|||
include(GenerateExportHeader)
|
||||
|
||||
add_library(emulator_static STATIC ${EMULATOR_STATIC_SOURCE})
|
||||
target_link_libraries(emulator_static PUBLIC ton_crypto ton_block smc-envelope)
|
||||
target_link_libraries(emulator_static PUBLIC ton_crypto smc-envelope)
|
||||
|
||||
if (NOT USE_EMSCRIPTEN AND BUILD_SHARED_LIBS)
|
||||
add_library(emulator SHARED ${EMULATOR_SOURCE} ${EMULATOR_HEADERS})
|
||||
|
@ -65,4 +65,4 @@ if (USE_EMSCRIPTEN)
|
|||
target_compile_options(emulator-emscripten PRIVATE -fexceptions)
|
||||
endif()
|
||||
|
||||
install(TARGETS emulator LIBRARY DESTINATION lib)
|
||||
install(TARGETS emulator ARCHIVE DESTINATION lib LIBRARY DESTINATION lib)
|
||||
|
|
|
@ -260,6 +260,10 @@ class PublicKey {
|
|||
td::BufferSlice export_as_slice() const;
|
||||
static td::Result<PublicKey> import(td::Slice s);
|
||||
|
||||
bool is_ed25519() const {
|
||||
return pub_key_.get_offset() == pub_key_.offset<pubkeys::Ed25519>();
|
||||
}
|
||||
|
||||
pubkeys::Ed25519 ed25519_value() const {
|
||||
CHECK(pub_key_.get_offset() == pub_key_.offset<pubkeys::Ed25519>());
|
||||
return pub_key_.get<pubkeys::Ed25519>();
|
||||
|
|
|
@ -1,10 +1,9 @@
|
|||
cmake_minimum_required(VERSION 3.5 FATAL_ERROR)
|
||||
|
||||
add_library(lite-client-common STATIC lite-client-common.cpp lite-client-common.h)
|
||||
target_link_libraries(lite-client-common PUBLIC tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_crypto ton_block)
|
||||
target_link_libraries(lite-client-common PUBLIC tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_crypto)
|
||||
|
||||
add_executable(lite-client lite-client.cpp lite-client.h)
|
||||
target_link_libraries(lite-client tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_crypto ton_block
|
||||
terminal lite-client-common git)
|
||||
target_link_libraries(lite-client tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils terminal lite-client-common git)
|
||||
|
||||
install(TARGETS lite-client RUNTIME DESTINATION bin)
|
||||
|
|
|
@ -965,8 +965,8 @@ bool TestNode::show_help(std::string command) {
|
|||
"recentcreatorstats <block-id-ext> <start-utime> [<count> [<start-pubkey>]]\tLists block creator statistics "
|
||||
"updated after <start-utime> by validator public "
|
||||
"key\n"
|
||||
"checkload[all|severe] <start-utime> <end-utime> [<savefile-prefix>]\tChecks whether all validators worked "
|
||||
"properly during specified time "
|
||||
"checkload[all|severe][-v2] <start-utime> <end-utime> [<savefile-prefix>]\tChecks whether all validators "
|
||||
"worked properly during specified time "
|
||||
"interval, and optionally saves proofs into <savefile-prefix>-<n>.boc\n"
|
||||
"loadproofcheck <filename>\tChecks a validator misbehavior proof previously created by checkload\n"
|
||||
"pastvalsets\tLists known past validator set ids and their hashes\n"
|
||||
|
@ -974,6 +974,11 @@ bool TestNode::show_help(std::string command) {
|
|||
"into files <filename-pfx><complaint-hash>.boc\n"
|
||||
"complaintprice <expires-in> <complaint-boc>\tComputes the price (in nanograms) for creating a complaint\n"
|
||||
"msgqueuesizes\tShows current sizes of outbound message queues in all shards\n"
|
||||
"dispatchqueueinfo <block-id>\tShows list of account dispatch queue of a block\n"
|
||||
"dispatchqueuemessages <block-id> <addr> [<after-lt>]\tShows deferred messages from account <addr>, lt > "
|
||||
"<after_lt>\n"
|
||||
"dispatchqueuemessagesall <block-id> [<after-addr> [<after-lt>]]\tShows messages from dispatch queue of a "
|
||||
"block, starting after <after_addr>, <after-lt>\n"
|
||||
"known\tShows the list of all known block ids\n"
|
||||
"knowncells\tShows the list of hashes of all known (cached) cells\n"
|
||||
"dumpcell <hex-hash-pfx>\nDumps a cached cell by a prefix of its hash\n"
|
||||
|
@ -988,9 +993,9 @@ bool TestNode::show_help(std::string command) {
|
|||
bool TestNode::do_parse_line() {
|
||||
ton::WorkchainId workchain = ton::masterchainId; // change to basechain later
|
||||
int addr_ext = 0;
|
||||
ton::StdSmcAddress addr{};
|
||||
ton::StdSmcAddress addr = ton::StdSmcAddress::zero();
|
||||
ton::BlockIdExt blkid{};
|
||||
ton::LogicalTime lt{};
|
||||
ton::LogicalTime lt = 0;
|
||||
ton::Bits256 hash{};
|
||||
ton::ShardIdFull shard{};
|
||||
ton::BlockSeqno seqno{};
|
||||
|
@ -1097,8 +1102,15 @@ bool TestNode::do_parse_line() {
|
|||
return parse_block_id_ext(blkid) && (!mode || parse_uint32(utime)) &&
|
||||
(seekeoln() ? (mode |= 0x100) : parse_uint32(count)) && (seekeoln() || (parse_hash(hash) && (mode |= 1))) &&
|
||||
seekeoln() && get_creator_stats(blkid, mode, count, hash, utime);
|
||||
} else if (word == "checkload" || word == "checkloadall" || word == "checkloadsevere") {
|
||||
int time1, time2, mode = (word == "checkloadsevere");
|
||||
} else if (word == "checkload" || word == "checkloadall" || word == "checkloadsevere" || word == "checkload-v2" ||
|
||||
word == "checkloadall-v2" || word == "checkloadsevere-v2") {
|
||||
int time1, time2, mode = 0;
|
||||
if (word == "checkloadsevere" || word == "checkloadsevere-v2") {
|
||||
mode |= 1;
|
||||
}
|
||||
if (td::ends_with(word, "-v2")) {
|
||||
mode |= 4;
|
||||
}
|
||||
std::string file_pfx;
|
||||
return parse_int32(time1) && parse_int32(time2) && (seekeoln() || ((mode |= 2) && get_word_to(file_pfx))) &&
|
||||
seekeoln() && check_validator_load(time1, time2, mode, file_pfx);
|
||||
|
@ -1118,6 +1130,16 @@ bool TestNode::do_parse_line() {
|
|||
set_error(get_complaint_price(expire_in, filename));
|
||||
} else if (word == "msgqueuesizes") {
|
||||
return get_msg_queue_sizes();
|
||||
} else if (word == "dispatchqueueinfo") {
|
||||
return parse_block_id_ext(blkid) && seekeoln() && get_dispatch_queue_info(blkid);
|
||||
} else if (word == "dispatchqueuemessages" || word == "dispatchqueuemessagesall") {
|
||||
bool one_account = word == "dispatchqueuemessages";
|
||||
if (!parse_block_id_ext(blkid)) {
|
||||
return false;
|
||||
}
|
||||
workchain = blkid.id.workchain;
|
||||
return ((!one_account && seekeoln()) || parse_account_addr(workchain, addr)) && (seekeoln() || parse_lt(lt)) &&
|
||||
seekeoln() && get_dispatch_queue_messages(blkid, workchain, addr, lt, one_account);
|
||||
} else if (word == "known") {
|
||||
return eoln() && show_new_blkids(true);
|
||||
} else if (word == "knowncells") {
|
||||
|
@ -1645,6 +1667,81 @@ void TestNode::got_msg_queue_sizes(ton::tl_object_ptr<ton::lite_api::liteServer_
|
|||
td::TerminalIO::out() << "External message queue size limit: " << f->ext_msg_queue_size_limit_ << std::endl;
|
||||
}
|
||||
|
||||
bool TestNode::get_dispatch_queue_info(ton::BlockIdExt block_id) {
|
||||
td::TerminalIO::out() << "Dispatch queue in block: " << block_id.id.to_str() << std::endl;
|
||||
return get_dispatch_queue_info_cont(block_id, true, td::Bits256::zero());
|
||||
}
|
||||
|
||||
bool TestNode::get_dispatch_queue_info_cont(ton::BlockIdExt block_id, bool first, td::Bits256 after_addr) {
|
||||
auto q = ton::create_serialize_tl_object<ton::lite_api::liteServer_getDispatchQueueInfo>(
|
||||
first ? 0 : 2, ton::create_tl_lite_block_id(block_id), after_addr, 32, false);
|
||||
return envelope_send_query(std::move(q), [=, Self = actor_id(this)](td::Result<td::BufferSlice> res) -> void {
|
||||
if (res.is_error()) {
|
||||
LOG(ERROR) << "liteServer.getDispatchQueueInfo error: " << res.move_as_error();
|
||||
return;
|
||||
}
|
||||
auto F = ton::fetch_tl_object<ton::lite_api::liteServer_dispatchQueueInfo>(res.move_as_ok(), true);
|
||||
if (F.is_error()) {
|
||||
LOG(ERROR) << "cannot parse answer to liteServer.getDispatchQueueInfo";
|
||||
return;
|
||||
}
|
||||
td::actor::send_closure_later(Self, &TestNode::got_dispatch_queue_info, block_id, F.move_as_ok());
|
||||
});
|
||||
}
|
||||
|
||||
void TestNode::got_dispatch_queue_info(ton::BlockIdExt block_id,
|
||||
ton::tl_object_ptr<ton::lite_api::liteServer_dispatchQueueInfo> info) {
|
||||
for (auto& acc : info->account_dispatch_queues_) {
|
||||
td::TerminalIO::out() << block_id.id.workchain << ":" << acc->addr_.to_hex() << " : size=" << acc->size_
|
||||
<< " lt=" << acc->min_lt_ << ".." << acc->max_lt_ << std::endl;
|
||||
}
|
||||
if (info->complete_) {
|
||||
td::TerminalIO::out() << "Done" << std::endl;
|
||||
return;
|
||||
}
|
||||
get_dispatch_queue_info_cont(block_id, false, info->account_dispatch_queues_.back()->addr_);
|
||||
}
|
||||
|
||||
bool TestNode::get_dispatch_queue_messages(ton::BlockIdExt block_id, ton::WorkchainId wc, ton::StdSmcAddress addr,
|
||||
ton::LogicalTime lt, bool one_account) {
|
||||
if (wc != block_id.id.workchain) {
|
||||
return set_error("workchain mismatch");
|
||||
}
|
||||
auto q = ton::create_serialize_tl_object<ton::lite_api::liteServer_getDispatchQueueMessages>(
|
||||
one_account ? 2 : 0, ton::create_tl_lite_block_id(block_id), addr, lt, 64, false, one_account, false);
|
||||
return envelope_send_query(std::move(q), [=, Self = actor_id(this)](td::Result<td::BufferSlice> res) -> void {
|
||||
if (res.is_error()) {
|
||||
LOG(ERROR) << "liteServer.getDispatchQueueMessages error: " << res.move_as_error();
|
||||
return;
|
||||
}
|
||||
auto F = ton::fetch_tl_object<ton::lite_api::liteServer_dispatchQueueMessages>(res.move_as_ok(), true);
|
||||
if (F.is_error()) {
|
||||
LOG(ERROR) << "cannot parse answer to liteServer.getDispatchQueueMessages";
|
||||
return;
|
||||
}
|
||||
td::actor::send_closure_later(Self, &TestNode::got_dispatch_queue_messages, F.move_as_ok());
|
||||
});
|
||||
}
|
||||
|
||||
void TestNode::got_dispatch_queue_messages(ton::tl_object_ptr<ton::lite_api::liteServer_dispatchQueueMessages> msgs) {
|
||||
td::TerminalIO::out() << "Dispatch queue messages (" << msgs->messages_.size() << "):\n";
|
||||
int count = 0;
|
||||
for (auto& m : msgs->messages_) {
|
||||
auto& meta = m->metadata_;
|
||||
td::TerminalIO::out() << "Msg #" << ++count << ": " << msgs->id_->workchain_ << ":" << m->addr_.to_hex() << " "
|
||||
<< m->lt_ << " : "
|
||||
<< (meta->initiator_->workchain_ == ton::workchainInvalid
|
||||
? "[ no metadata ]"
|
||||
: block::MsgMetadata{(td::uint32)meta->depth_, meta->initiator_->workchain_,
|
||||
meta->initiator_->id_, (ton::LogicalTime)meta->initiator_lt_}
|
||||
.to_str())
|
||||
<< "\n";
|
||||
}
|
||||
if (!msgs->complete_) {
|
||||
td::TerminalIO::out() << "(incomplete list)\n";
|
||||
}
|
||||
}
|
||||
|
||||
bool TestNode::dns_resolve_start(ton::WorkchainId workchain, ton::StdSmcAddress addr, ton::BlockIdExt blkid,
|
||||
std::string domain, td::Bits256 cat, int mode) {
|
||||
if (domain.size() >= 2 && domain[0] == '"' && domain.back() == '"') {
|
||||
|
@ -3616,7 +3713,7 @@ void TestNode::continue_check_validator_load2(std::unique_ptr<TestNode::Validato
|
|||
load_creator_stats(std::move(info2), std::move(P.second), true);
|
||||
}
|
||||
|
||||
// computes the probability of creating <= x masterchain blocks if the expected value is y
|
||||
// computes the probability of creating <= x blocks if the expected value is y
|
||||
static double create_prob(int x, double y) {
|
||||
if (x < 0 || y < 0) {
|
||||
return .5;
|
||||
|
@ -3657,49 +3754,79 @@ void TestNode::continue_check_validator_load3(std::unique_ptr<TestNode::Validato
|
|||
std::unique_ptr<TestNode::ValidatorLoadInfo> info2, int mode,
|
||||
std::string file_pfx) {
|
||||
LOG(INFO) << "continue_check_validator_load3 for blocks " << info1->blk_id.to_str() << " and "
|
||||
<< info2->blk_id.to_str() << " with mode=" << mode << " and file prefix `" << file_pfx
|
||||
<< "`: comparing block creators data";
|
||||
<< info2->blk_id.to_str() << " with mode=" << mode << " and file prefix `" << file_pfx;
|
||||
|
||||
if (mode & 4) {
|
||||
ton::BlockSeqno start_seqno = info1->blk_id.seqno();
|
||||
ton::BlockSeqno end_seqno = info2->blk_id.seqno();
|
||||
block::ValidatorSet validator_set = *info1->vset;
|
||||
if (info1->config->get_config_param(28)->get_hash() != info2->config->get_config_param(28)->get_hash()) {
|
||||
LOG(ERROR) << "Catchain validator config (28) changed between the first and the last block";
|
||||
return;
|
||||
}
|
||||
auto catchain_config = std::make_unique<block::CatchainValidatorsConfig>(
|
||||
block::Config::unpack_catchain_validators_config(info1->config->get_config_param(28)));
|
||||
load_validator_shard_shares(
|
||||
start_seqno, end_seqno, std::move(validator_set), std::move(catchain_config),
|
||||
[=, this, info1 = std::move(info1),
|
||||
info2 = std::move(info2)](td::Result<std::map<td::Bits256, td::uint64>> R) mutable {
|
||||
if (R.is_error()) {
|
||||
LOG(ERROR) << "failed to load validator shard shares: " << R.move_as_error();
|
||||
} else {
|
||||
continue_check_validator_load4(std::move(info1), std::move(info2), mode, file_pfx, R.move_as_ok());
|
||||
}
|
||||
});
|
||||
} else {
|
||||
continue_check_validator_load4(std::move(info1), std::move(info2), mode, std::move(file_pfx), {});
|
||||
}
|
||||
}
|
||||
|
||||
void TestNode::continue_check_validator_load4(std::unique_ptr<TestNode::ValidatorLoadInfo> info1,
|
||||
std::unique_ptr<TestNode::ValidatorLoadInfo> info2, int mode,
|
||||
std::string file_pfx,
|
||||
std::map<td::Bits256, td::uint64> exact_shard_shares) {
|
||||
LOG(INFO) << "continue_check_validator_load4 for blocks " << info1->blk_id.to_str() << " and "
|
||||
<< info2->blk_id.to_str() << " with mode=" << mode << " and file prefix `" << file_pfx;
|
||||
if (info1->created_total.first <= 0 || info2->created_total.first <= 0) {
|
||||
LOG(ERROR) << "no total created blocks statistics";
|
||||
return;
|
||||
}
|
||||
td::TerminalIO::out() << "total: (" << info1->created_total.first << "," << info1->created_total.second << ") -> ("
|
||||
<< info2->created_total.first << "," << info2->created_total.second << ")\n";
|
||||
auto x = info2->created_total.first - info1->created_total.first;
|
||||
auto y = info2->created_total.second - info1->created_total.second;
|
||||
td::int64 xs = 0, ys = 0;
|
||||
if (x <= 0 || y < 0 || (x | y) >= (1u << 31)) {
|
||||
LOG(ERROR) << "impossible situation: zero or no blocks created: " << x << " masterchain blocks, " << y
|
||||
<< " shardchain blocks";
|
||||
auto created_total_mc = info2->created_total.first - info1->created_total.first;
|
||||
auto created_total_bc = info2->created_total.second - info1->created_total.second;
|
||||
td::int64 created_mc_sum = 0, created_bc_sum = 0;
|
||||
if (created_total_mc <= 0 || created_total_bc < 0 || (created_total_mc | created_total_bc) >= (1U << 31)) {
|
||||
LOG(ERROR) << "impossible situation: zero or no blocks created: " << created_total_mc << " masterchain blocks, "
|
||||
<< created_total_bc << " shardchain blocks";
|
||||
return;
|
||||
}
|
||||
std::pair<int, int> created_total{(int)x, (int)y};
|
||||
int count = info1->vset->total;
|
||||
CHECK(info2->vset->total == count);
|
||||
CHECK((int)info1->created.size() == count);
|
||||
CHECK((int)info2->created.size() == count);
|
||||
std::vector<std::pair<int, int>> d;
|
||||
d.reserve(count);
|
||||
std::vector<std::pair<int, int>> vals_created;
|
||||
vals_created.reserve(count);
|
||||
for (int i = 0; i < count; i++) {
|
||||
auto x1 = info2->created[i].first - info1->created[i].first;
|
||||
auto y1 = info2->created[i].second - info1->created[i].second;
|
||||
if (x1 < 0 || y1 < 0 || (x1 | y1) >= (1u << 31)) {
|
||||
LOG(ERROR) << "impossible situation: validator #" << i << " created a negative amount of blocks: " << x1
|
||||
<< " masterchain blocks, " << y1 << " shardchain blocks";
|
||||
auto created_mc = info2->created[i].first - info1->created[i].first;
|
||||
auto created_bc = info2->created[i].second - info1->created[i].second;
|
||||
if (created_mc < 0 || created_bc < 0 || (created_mc | created_bc) >= (1u << 31)) {
|
||||
LOG(ERROR) << "impossible situation: validator #" << i << " created a negative amount of blocks: " << created_mc
|
||||
<< " masterchain blocks, " << created_bc << " shardchain blocks";
|
||||
return;
|
||||
}
|
||||
xs += x1;
|
||||
ys += y1;
|
||||
d.emplace_back((int)x1, (int)y1);
|
||||
td::TerminalIO::out() << "val #" << i << ": created (" << x1 << "," << y1 << ") ; was (" << info1->created[i].first
|
||||
<< "," << info1->created[i].second << ")\n";
|
||||
created_mc_sum += created_mc;
|
||||
created_bc_sum += created_bc;
|
||||
vals_created.emplace_back((int)created_mc, (int)created_bc);
|
||||
td::TerminalIO::out() << "val #" << i << ": created (" << created_mc << "," << created_bc << ") ; was ("
|
||||
<< info1->created[i].first << "," << info1->created[i].second << ")\n";
|
||||
}
|
||||
if (xs != x || ys != y) {
|
||||
LOG(ERROR) << "cannot account for all blocks created: total is (" << x << "," << y
|
||||
<< "), but the sum for all validators is (" << xs << "," << ys << ")";
|
||||
if (created_mc_sum != created_total_mc || created_bc_sum != created_total_bc) {
|
||||
LOG(ERROR) << "cannot account for all blocks created: total is (" << created_total_mc << "," << created_total_bc
|
||||
<< "), but the sum for all validators is (" << created_mc_sum << "," << created_bc_sum << ")";
|
||||
return;
|
||||
}
|
||||
td::TerminalIO::out() << "total: (" << x << "," << y << ")\n";
|
||||
td::TerminalIO::out() << "total: (" << created_total_mc << "," << created_total_bc << ")\n";
|
||||
auto ccfg = block::Config::unpack_catchain_validators_config(info2->config->get_config_param(28));
|
||||
auto ccfg_old = block::Config::unpack_catchain_validators_config(info1->config->get_config_param(28));
|
||||
if (ccfg.shard_val_num != ccfg_old.shard_val_num || ccfg.shard_val_num <= 0) {
|
||||
|
@ -3707,57 +3834,216 @@ void TestNode::continue_check_validator_load3(std::unique_ptr<TestNode::Validato
|
|||
<< ", or is not positive";
|
||||
return;
|
||||
}
|
||||
int shard_count = ccfg.shard_val_num, main_count = info2->vset->main;
|
||||
if (info1->vset->main != main_count || main_count <= 0) {
|
||||
LOG(ERROR) << "masterchain validator group size changed from " << info1->vset->main << " to " << main_count
|
||||
int shard_vals = ccfg.shard_val_num, master_vals = info2->vset->main;
|
||||
if (info1->vset->main != master_vals || master_vals <= 0) {
|
||||
LOG(ERROR) << "masterchain validator group size changed from " << info1->vset->main << " to " << master_vals
|
||||
<< ", or is not positive";
|
||||
return;
|
||||
}
|
||||
int cnt = 0, cnt_ok = 0;
|
||||
double chunk_size = ccfg.shard_val_lifetime / 3. / shard_count;
|
||||
block::MtCarloComputeShare shard_share(shard_count, info2->vset->export_scaled_validator_weights());
|
||||
|
||||
bool use_exact_shard_share = mode & 4;
|
||||
int proofs_cnt = 0, proofs_cnt_ok = 0;
|
||||
double chunk_size = ccfg.shard_val_lifetime / 3. / shard_vals;
|
||||
|
||||
std::vector<double> mtc_shard_share;
|
||||
if (use_exact_shard_share) {
|
||||
LOG(INFO) << "using exact shard shares";
|
||||
td::uint64 exact_shard_shares_sum = 0;
|
||||
for (auto& [_, count] : exact_shard_shares) {
|
||||
exact_shard_shares_sum += count;
|
||||
}
|
||||
if ((td::int64)exact_shard_shares_sum != shard_vals * created_bc_sum) {
|
||||
LOG(ERROR) << "unexpected total shard shares: blocks=" << created_bc_sum << ", shard_vals=" << shard_vals
|
||||
<< ", expected_sum=" << shard_vals * created_bc_sum << ", found=" << exact_shard_shares_sum;
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
LOG(INFO) << "using MtCarloComputeShare";
|
||||
block::MtCarloComputeShare mtc(shard_vals, info2->vset->export_scaled_validator_weights());
|
||||
if (!mtc.is_ok()) {
|
||||
LOG(ERROR) << "failed to compute shard shares";
|
||||
return;
|
||||
}
|
||||
mtc_shard_share.resize(count);
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
mtc_shard_share[i] = mtc[i];
|
||||
}
|
||||
}
|
||||
|
||||
auto validators = info1->vset->export_validator_set();
|
||||
for (int i = 0; i < count; i++) {
|
||||
int x1 = d[i].first, y1 = d[i].second;
|
||||
bool is_masterchain_validator = i < main_count;
|
||||
double xe = (is_masterchain_validator ? (double)xs / main_count : 0);
|
||||
double ye = shard_share[i] * (double)ys / shard_count;
|
||||
int created_mc = vals_created[i].first, created_bc = vals_created[i].second;
|
||||
bool is_masterchain_validator = i < master_vals;
|
||||
|
||||
double expected_created_mc = (is_masterchain_validator ? (double)created_mc_sum / master_vals : 0);
|
||||
double prob_mc = create_prob(created_mc, .9 * expected_created_mc);
|
||||
|
||||
double expected_created_bc, prob_bc;
|
||||
if (use_exact_shard_share) {
|
||||
expected_created_bc = (double)exact_shard_shares[validators[i].key.as_bits256()] / shard_vals;
|
||||
prob_bc = create_prob(created_bc, .9 * expected_created_bc);
|
||||
} else {
|
||||
expected_created_bc = mtc_shard_share[i] * (double)created_bc_sum / shard_vals;
|
||||
prob_bc = shard_create_prob(created_bc, .9 * expected_created_bc, chunk_size);
|
||||
}
|
||||
|
||||
td::Bits256 pk = info2->vset->list[i].pubkey.as_bits256();
|
||||
double p1 = create_prob(x1, .9 * xe), p2 = shard_create_prob(y1, .9 * ye, chunk_size);
|
||||
td::TerminalIO::out() << "val #" << i << ": pubkey " << pk.to_hex() << ", blocks created (" << x1 << "," << y1
|
||||
<< "), expected (" << xe << "," << ye << "), probabilities " << p1 << " and " << p2 << "\n";
|
||||
if ((is_masterchain_validator ? p1 : p2) < .00001) {
|
||||
td::TerminalIO::out() << "val #" << i << ": pubkey " << pk.to_hex() << ", blocks created (" << created_mc << ","
|
||||
<< created_bc << "), expected (" << expected_created_mc << "," << expected_created_bc
|
||||
<< "), probabilities " << prob_mc << " and " << prob_bc << "\n";
|
||||
if ((is_masterchain_validator ? prob_mc : prob_bc) < .00001) {
|
||||
LOG(ERROR) << "validator #" << i << " with pubkey " << pk.to_hex()
|
||||
<< " : serious misbehavior detected: created less than 90% of the expected amount of blocks with "
|
||||
"probability 99.999% : created ("
|
||||
<< x1 << "," << y1 << "), expected (" << xe << "," << ye << ") masterchain/shardchain blocks\n";
|
||||
<< created_mc << "," << created_bc << "), expected (" << expected_created_mc << ","
|
||||
<< expected_created_bc << ") masterchain/shardchain blocks\n";
|
||||
if (mode & 2) {
|
||||
auto st = write_val_create_proof(*info1, *info2, i, true, file_pfx, ++cnt);
|
||||
auto st = write_val_create_proof(*info1, *info2, i, true, file_pfx, ++proofs_cnt);
|
||||
if (st.is_error()) {
|
||||
LOG(ERROR) << "cannot create proof: " << st.move_as_error();
|
||||
} else {
|
||||
cnt_ok++;
|
||||
proofs_cnt_ok++;
|
||||
}
|
||||
}
|
||||
} else if ((is_masterchain_validator ? p1 : p2) < .005) {
|
||||
} else if ((is_masterchain_validator ? prob_mc : prob_bc) < .005) {
|
||||
LOG(ERROR) << "validator #" << i << " with pubkey " << pk.to_hex()
|
||||
<< " : moderate misbehavior detected: created less than 90% of the expected amount of blocks with "
|
||||
"probability 99.5% : created ("
|
||||
<< x1 << "," << y1 << "), expected (" << xe << "," << ye << ") masterchain/shardchain blocks\n";
|
||||
<< created_mc << "," << created_bc << "), expected (" << expected_created_mc << ","
|
||||
<< expected_created_bc << ") masterchain/shardchain blocks\n";
|
||||
if ((mode & 3) == 2) {
|
||||
auto st = write_val_create_proof(*info1, *info2, i, false, file_pfx, ++cnt);
|
||||
auto st = write_val_create_proof(*info1, *info2, i, false, file_pfx, ++proofs_cnt);
|
||||
if (st.is_error()) {
|
||||
LOG(ERROR) << "cannot create proof: " << st.move_as_error();
|
||||
} else {
|
||||
cnt_ok++;
|
||||
proofs_cnt_ok++;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (cnt > 0) {
|
||||
LOG(INFO) << cnt_ok << " out of " << cnt << " proofs written to " << file_pfx << "-*.boc";
|
||||
if (proofs_cnt > 0) {
|
||||
LOG(INFO) << proofs_cnt_ok << " out of " << proofs_cnt << " proofs written to " << file_pfx << "-*.boc";
|
||||
}
|
||||
}
|
||||
|
||||
void TestNode::load_validator_shard_shares(ton::BlockSeqno start_seqno, ton::BlockSeqno end_seqno,
|
||||
block::ValidatorSet validator_set,
|
||||
std::unique_ptr<block::CatchainValidatorsConfig> catchain_config,
|
||||
td::Promise<std::map<td::Bits256, td::uint64>> promise) {
|
||||
CHECK(start_seqno <= end_seqno);
|
||||
LOG(INFO) << "loading shard shares from mc blocks " << start_seqno << ".." << end_seqno << " ("
|
||||
<< end_seqno - start_seqno + 1 << " blocks)";
|
||||
auto state = std::make_shared<LoadValidatorShardSharesState>();
|
||||
state->start_seqno = start_seqno;
|
||||
state->end_seqno = end_seqno;
|
||||
state->validator_set = std::move(validator_set);
|
||||
state->catchain_config = std::move(catchain_config);
|
||||
state->shard_configs.resize(end_seqno - start_seqno + 1);
|
||||
state->promise = std::move(promise);
|
||||
load_validator_shard_shares_cont(std::move(state));
|
||||
}
|
||||
|
||||
void TestNode::load_validator_shard_shares_cont(std::shared_ptr<LoadValidatorShardSharesState> state) {
|
||||
if (!state->promise) {
|
||||
return;
|
||||
}
|
||||
if (state->loaded % 100 == 0) {
|
||||
LOG(INFO) << "loaded " << state->loaded << "/" << state->shard_configs.size() << " mc blocks";
|
||||
}
|
||||
while (state->cur_idx < state->shard_configs.size() && state->pending < 8) {
|
||||
load_block_shard_configuration(state->start_seqno + state->cur_idx,
|
||||
[this, state, idx = state->cur_idx](td::Result<block::ShardConfig> R) mutable {
|
||||
if (R.is_error()) {
|
||||
state->promise.set_error(R.move_as_error());
|
||||
state->promise = {};
|
||||
} else {
|
||||
state->shard_configs[idx] = R.move_as_ok();
|
||||
--state->pending;
|
||||
++state->loaded;
|
||||
load_validator_shard_shares_cont(std::move(state));
|
||||
}
|
||||
});
|
||||
++state->pending;
|
||||
++state->cur_idx;
|
||||
}
|
||||
|
||||
if (state->loaded != state->shard_configs.size()) {
|
||||
return;
|
||||
}
|
||||
LOG(INFO) << "loaded all " << state->shard_configs.size() << " mc blocks, computing shard shares";
|
||||
std::map<td::Bits256, td::uint64> result;
|
||||
try {
|
||||
for (size_t idx = 0; idx + 1 < state->shard_configs.size(); ++idx) {
|
||||
block::ShardConfig& shards1 = state->shard_configs[idx];
|
||||
block::ShardConfig& shards2 = state->shard_configs[idx + 1];
|
||||
|
||||
// Compute validator groups, see ValidatorManagerImpl::update_shards
|
||||
auto process_shard = [&](ton::ShardIdFull shard, ton::BlockSeqno first_seqno) {
|
||||
auto desc2 = shards2.get_shard_hash(shard);
|
||||
if (desc2.is_null() || desc2->seqno() < first_seqno) {
|
||||
return;
|
||||
}
|
||||
td::uint32 blocks_count = desc2->seqno() - first_seqno + 1;
|
||||
ton::CatchainSeqno cc_seqno = shards1.get_shard_cc_seqno(shard);
|
||||
auto val_set =
|
||||
block::ConfigInfo::do_compute_validator_set(*state->catchain_config, shard, state->validator_set, cc_seqno);
|
||||
for (const auto &val : val_set) {
|
||||
result[val.key.as_bits256()] += blocks_count;
|
||||
}
|
||||
};
|
||||
|
||||
for (const ton::BlockId& id : shards1.get_shard_hash_ids()) {
|
||||
ton::ShardIdFull shard = id.shard_full();
|
||||
auto desc = shards1.get_shard_hash(shard);
|
||||
CHECK(desc.not_null());
|
||||
if (desc->before_split()) {
|
||||
ton::ShardIdFull l_shard = shard_child(shard, true);
|
||||
ton::ShardIdFull r_shard = shard_child(shard, false);
|
||||
process_shard(l_shard, desc->seqno() + 1);
|
||||
process_shard(r_shard, desc->seqno() + 1);
|
||||
} else if (desc->before_merge()) {
|
||||
if (is_right_child(shard)) {
|
||||
continue;
|
||||
}
|
||||
ton::ShardIdFull sibling_shard = shard_sibling(shard);
|
||||
auto sibling_desc = shards1.get_shard_hash(sibling_shard);
|
||||
CHECK(sibling_desc.not_null());
|
||||
ton::ShardIdFull p_shard = shard_parent(shard);
|
||||
process_shard(p_shard, std::max(desc->seqno(), sibling_desc->seqno()) + 1);
|
||||
} else {
|
||||
process_shard(shard, desc->seqno() + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (vm::VmError &e) {
|
||||
state->promise.set_error(e.as_status("cannot parse shard hashes: "));
|
||||
return;
|
||||
}
|
||||
state->promise.set_value(std::move(result));
|
||||
}
|
||||
|
||||
void TestNode::load_block_shard_configuration(ton::BlockSeqno seqno, td::Promise<block::ShardConfig> promise) {
|
||||
lookup_block(
|
||||
ton::ShardIdFull{ton::masterchainId}, 1, seqno,
|
||||
[this, promise = std::move(promise)](td::Result<BlockHdrInfo> R) mutable {
|
||||
TRY_RESULT_PROMISE(promise, res, std::move(R));
|
||||
auto b = ton::serialize_tl_object(
|
||||
ton::create_tl_object<ton::lite_api::liteServer_getAllShardsInfo>(ton::create_tl_lite_block_id(res.blk_id)),
|
||||
true);
|
||||
envelope_send_query(std::move(b), [this, promise = std::move(promise)](td::Result<td::BufferSlice> R) mutable {
|
||||
TRY_RESULT_PROMISE(promise, data, std::move(R));
|
||||
TRY_RESULT_PROMISE(promise, f, ton::fetch_tl_object<ton::lite_api::liteServer_allShardsInfo>(data, true));
|
||||
TRY_RESULT_PROMISE(promise, root, vm::std_boc_deserialize(f->data_));
|
||||
block::ShardConfig sh_conf;
|
||||
if (!sh_conf.unpack(load_cell_slice_ref(root))) {
|
||||
promise.set_error(td::Status::Error("cannot extract shard block list from shard configuration"));
|
||||
} else {
|
||||
promise.set_value(std::move(sh_conf));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
bool compute_punishment_default(int interval, bool severe, td::RefInt256& fine, unsigned& fine_part) {
|
||||
if (interval <= 1000) {
|
||||
return false; // no punishments for less than 1000 seconds
|
||||
|
|
|
@ -282,6 +282,26 @@ class TestNode : public td::actor::Actor {
|
|||
void continue_check_validator_load3(std::unique_ptr<ValidatorLoadInfo> info1,
|
||||
std::unique_ptr<ValidatorLoadInfo> info2, int mode = 0,
|
||||
std::string file_pfx = "");
|
||||
void continue_check_validator_load4(std::unique_ptr<ValidatorLoadInfo> info1,
|
||||
std::unique_ptr<ValidatorLoadInfo> info2, int mode, std::string file_pfx,
|
||||
std::map<td::Bits256, td::uint64> exact_shard_shares);
|
||||
|
||||
struct LoadValidatorShardSharesState {
|
||||
ton::BlockSeqno start_seqno;
|
||||
ton::BlockSeqno end_seqno;
|
||||
block::ValidatorSet validator_set;
|
||||
std::unique_ptr<block::CatchainValidatorsConfig> catchain_config;
|
||||
std::vector<block::ShardConfig> shard_configs;
|
||||
td::uint32 cur_idx = 0, pending = 0, loaded = 0;
|
||||
td::Promise<std::map<td::Bits256, td::uint64>> promise;
|
||||
};
|
||||
void load_validator_shard_shares(ton::BlockSeqno start_seqno, ton::BlockSeqno end_seqno,
|
||||
block::ValidatorSet validator_set,
|
||||
std::unique_ptr<block::CatchainValidatorsConfig> catchain_config,
|
||||
td::Promise<std::map<td::Bits256, td::uint64>> promise);
|
||||
void load_validator_shard_shares_cont(std::shared_ptr<LoadValidatorShardSharesState> state);
|
||||
void load_block_shard_configuration(ton::BlockSeqno seqno, td::Promise<block::ShardConfig> promise);
|
||||
|
||||
td::Status write_val_create_proof(ValidatorLoadInfo& info1, ValidatorLoadInfo& info2, int idx, bool severe,
|
||||
std::string file_pfx, int cnt);
|
||||
bool load_creator_stats(std::unique_ptr<ValidatorLoadInfo> load_to,
|
||||
|
@ -307,6 +327,13 @@ class TestNode : public td::actor::Actor {
|
|||
unsigned refs, td::Bits256 chash, std::string filename);
|
||||
bool get_msg_queue_sizes();
|
||||
void got_msg_queue_sizes(ton::tl_object_ptr<ton::lite_api::liteServer_outMsgQueueSizes> f);
|
||||
bool get_dispatch_queue_info(ton::BlockIdExt block_id);
|
||||
bool get_dispatch_queue_info_cont(ton::BlockIdExt block_id, bool first, td::Bits256 after_addr);
|
||||
void got_dispatch_queue_info(ton::BlockIdExt block_id,
|
||||
ton::tl_object_ptr<ton::lite_api::liteServer_dispatchQueueInfo> info);
|
||||
bool get_dispatch_queue_messages(ton::BlockIdExt block_id, ton::WorkchainId wc, ton::StdSmcAddress addr,
|
||||
ton::LogicalTime lt, bool one_account);
|
||||
void got_dispatch_queue_messages(ton::tl_object_ptr<ton::lite_api::liteServer_dispatchQueueMessages> msgs);
|
||||
bool cache_cell(Ref<vm::Cell> cell);
|
||||
bool list_cached_cells() const;
|
||||
bool dump_cached_cell(td::Slice hash_pfx, td::Slice type_name = {});
|
||||
|
|
|
@ -68,7 +68,7 @@ td::Status BroadcastSimple::run_checks() {
|
|||
|
||||
td::Status BroadcastSimple::distribute() {
|
||||
auto B = serialize();
|
||||
auto nodes = overlay_->get_neighbours(3);
|
||||
auto nodes = overlay_->get_neighbours(overlay_->propagate_broadcast_to());
|
||||
|
||||
auto manager = overlay_->overlay_manager();
|
||||
for (auto &n : nodes) {
|
||||
|
@ -115,7 +115,8 @@ td::Status BroadcastSimple::run() {
|
|||
return run_continue();
|
||||
}
|
||||
|
||||
td::Status BroadcastSimple::create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id, tl_object_ptr<ton_api::overlay_broadcast> broadcast) {
|
||||
td::Status BroadcastSimple::create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id,
|
||||
tl_object_ptr<ton_api::overlay_broadcast> broadcast) {
|
||||
auto src = PublicKey{broadcast->src_};
|
||||
auto data_hash = sha256_bits256(broadcast->data_.as_slice());
|
||||
auto broadcast_hash = compute_broadcast_id(src, data_hash, broadcast->flags_);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue