mirror of
https://github.com/ton-blockchain/ton
synced 2025-02-12 11:12:16 +00:00
Merge pull request #1099 from ton-blockchain/testnet
Merge developer branch
This commit is contained in:
commit
140320b0db
130 changed files with 5291 additions and 979 deletions
|
@ -7,7 +7,7 @@ jobs:
|
|||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-20.04, ubuntu-22.04]
|
||||
os: [ubuntu-20.04, ubuntu-22.04, ubuntu-24.04]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
|
@ -21,7 +21,7 @@ jobs:
|
|||
sudo apt-get update
|
||||
sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev liblz4-dev libjemalloc-dev
|
||||
|
||||
- name: Install clang-16
|
||||
- if: matrix.os != 'ubuntu-24.04'
|
||||
run: |
|
||||
wget https://apt.llvm.org/llvm.sh
|
||||
chmod +x llvm.sh
|
||||
|
|
|
@ -19,7 +19,7 @@ jobs:
|
|||
|
||||
- name: Build TON WASM artifacts
|
||||
run: |
|
||||
cd assembly/wasm
|
||||
cp assembly/wasm/fift-func-wasm-build-ubuntu.sh .
|
||||
chmod +x fift-func-wasm-build-ubuntu.sh
|
||||
./fift-func-wasm-build-ubuntu.sh -a
|
||||
|
||||
|
|
37
.github/workflows/docker-ubuntu-image.yml
vendored
37
.github/workflows/docker-ubuntu-image.yml
vendored
|
@ -20,22 +20,49 @@ jobs:
|
|||
submodules: 'recursive'
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Build and export to Docker
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
load: true
|
||||
context: ./
|
||||
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
docker run --rm -e "TEST=1" ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:test
|
||||
|
||||
- name: Get next tag
|
||||
id: tag
|
||||
run: |
|
||||
git fetch --all --tags
|
||||
git tag -l
|
||||
NEW_TAG=v$(date +'%Y.%m')
|
||||
FOUND=$(git tag -l | grep $NEW_TAG | wc -l)
|
||||
if [ $FOUND -eq 0 ]; then
|
||||
echo "TAG=$NEW_TAG" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "TAG=$NEW_TAG-$FOUND" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Build and push
|
||||
id: docker_build
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
context: ./
|
||||
tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||
tags: |
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest
|
||||
${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${{ steps.tag.outputs.TAG }}
|
||||
|
|
8
.github/workflows/ton-x86-64-windows.yml
vendored
8
.github/workflows/ton-x86-64-windows.yml
vendored
|
@ -9,7 +9,7 @@ defaults:
|
|||
jobs:
|
||||
build:
|
||||
|
||||
runs-on: windows-2022
|
||||
runs-on: windows-2019
|
||||
|
||||
steps:
|
||||
- name: Get Current OS version
|
||||
|
@ -23,9 +23,9 @@ jobs:
|
|||
|
||||
- name: Build TON
|
||||
run: |
|
||||
copy assembly\native\build-windows-github.bat .
|
||||
copy assembly\native\build-windows.bat .
|
||||
build-windows-github.bat Enterprise
|
||||
copy assembly\native\build-windows-github-2019.bat .
|
||||
copy assembly\native\build-windows-2019.bat .
|
||||
build-windows-github-2019.bat Enterprise
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@master
|
||||
|
|
|
@ -37,12 +37,14 @@ if (NOT DEFINED SODIUM_USE_STATIC_LIBS)
|
|||
option(SODIUM_USE_STATIC_LIBS "enable to statically link against sodium" OFF)
|
||||
endif()
|
||||
if(NOT (SODIUM_USE_STATIC_LIBS EQUAL SODIUM_USE_STATIC_LIBS_LAST))
|
||||
unset(sodium_LIBRARY CACHE)
|
||||
unset(SODIUM_LIBRARY_DEBUG CACHE)
|
||||
unset(SODIUM_LIBRARY_RELEASE CACHE)
|
||||
unset(sodium_DLL_DEBUG CACHE)
|
||||
unset(sodium_DLL_RELEASE CACHE)
|
||||
set(SODIUM_USE_STATIC_LIBS_LAST ${SODIUM_USE_STATIC_LIBS} CACHE INTERNAL "internal change tracking variable")
|
||||
if (NOT SODIUM_LIBRARY_RELEASE)
|
||||
unset(sodium_LIBRARY CACHE)
|
||||
unset(SODIUM_LIBRARY_DEBUG CACHE)
|
||||
unset(SODIUM_LIBRARY_RELEASE CACHE)
|
||||
unset(sodium_DLL_DEBUG CACHE)
|
||||
unset(sodium_DLL_RELEASE CACHE)
|
||||
set(SODIUM_USE_STATIC_LIBS_LAST ${SODIUM_USE_STATIC_LIBS} CACHE INTERNAL "internal change tracking variable")
|
||||
endif()
|
||||
endif()
|
||||
|
||||
|
||||
|
@ -295,4 +297,4 @@ else()
|
|||
)
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
|
|
@ -539,6 +539,9 @@ target_link_libraries(test-ton-collator overlay tdutils tdactor adnl tl_api dht
|
|||
add_executable(test-http test/test-http.cpp)
|
||||
target_link_libraries(test-http PRIVATE tonhttp)
|
||||
|
||||
add_executable(test-emulator test/test-td-main.cpp emulator/test/emulator-tests.cpp)
|
||||
target_link_libraries(test-emulator PRIVATE emulator)
|
||||
|
||||
get_directory_property(HAS_PARENT PARENT_DIRECTORY)
|
||||
if (HAS_PARENT)
|
||||
set(ALL_TEST_SOURCE
|
||||
|
@ -570,6 +573,7 @@ add_test(test-cells test-cells ${TEST_OPTIONS})
|
|||
add_test(test-smartcont test-smartcont)
|
||||
add_test(test-net test-net)
|
||||
add_test(test-actors test-tdactor)
|
||||
add_test(test-emulator test-emulator)
|
||||
|
||||
#BEGIN tonlib
|
||||
add_test(test-tdutils test-tdutils)
|
||||
|
|
17
Changelog.md
17
Changelog.md
|
@ -1,3 +1,20 @@
|
|||
## 2024.08 Update
|
||||
|
||||
1. Introduction of dispatch queues, message envelopes with transaction chain metadata, and explicitly stored msg_queue size, which will be activated by `Config8.version >= 8` and new `Config8.capabilities` bits: `capStoreOutMsgQueueSize`, `capMsgMetadata`, `capDeferMessages`.
|
||||
2. A number of changes to transcation executor which will activated for `Config8.version >= 8`:
|
||||
- Check mode on invalid `action_send_msg`. Ignore action if `IGNORE_ERROR` (+2) bit is set, bounce if `BOUNCE_ON_FAIL` (+16) bit is set.
|
||||
- Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`.
|
||||
- Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent.
|
||||
- Allow unfreeze through external messages.
|
||||
- Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages.
|
||||
3. A few issues with broadcasts were fixed: stop on receiving last piece, response to AdnlMessageCreateChannel
|
||||
4. A number of fixes and improvements for emulator and tonlib: correct work with config_addr, not accepted externals, bounces, debug ops gas consumption, added version and c5 dump, fixed tonlib crashes
|
||||
5. Added new flags and commands to the node, in particular `--fast-state-serializer`, `getcollatoroptionsjson`, `setcollatoroptionsjson`
|
||||
|
||||
Besides the work of the core team, this update is based on the efforts of @krigga (emulator), stonfi team, in particular @dbaranovstonfi and @hey-researcher (emulator), and @loeul, @xiaoxianBoy, @simlecode (typos in comments and docs).
|
||||
|
||||
|
||||
|
||||
## 2024.06 Update
|
||||
|
||||
1. Make Jemalloc default allocator
|
||||
|
|
34
Dockerfile
34
Dockerfile
|
@ -1,10 +1,15 @@
|
|||
FROM ubuntu:22.04 as builder
|
||||
FROM ubuntu:22.04 AS builder
|
||||
RUN apt-get update && \
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev pkg-config autoconf automake libtool libjemalloc-dev && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
ENV CC clang
|
||||
ENV CXX clang++
|
||||
ENV CCACHE_DISABLE 1
|
||||
DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev pkg-config autoconf automake libtool libjemalloc-dev lsb-release software-properties-common gnupg
|
||||
|
||||
RUN wget https://apt.llvm.org/llvm.sh && \
|
||||
chmod +x llvm.sh && \
|
||||
./llvm.sh 16 all && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV CC=/usr/bin/clang-16
|
||||
ENV CXX=/usr/bin/clang++-16
|
||||
ENV CCACHE_DISABLE=1
|
||||
|
||||
WORKDIR /
|
||||
RUN mkdir ton
|
||||
|
@ -13,17 +18,16 @@ WORKDIR /ton
|
|||
COPY ./ ./
|
||||
|
||||
RUN mkdir build && \
|
||||
cd build && \
|
||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DTON_USE_JEMALLOC=ON .. && \
|
||||
ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client
|
||||
cd build && \
|
||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DTON_USE_JEMALLOC=ON .. && \
|
||||
ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client
|
||||
|
||||
FROM ubuntu:22.04
|
||||
RUN apt-get update && \
|
||||
apt-get install -y wget libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev libjemalloc-dev && \
|
||||
apt-get install -y wget curl libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev liblz4-dev libjemalloc-dev htop net-tools netcat iptraf-ng jq tcpdump pv plzip && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN mkdir -p /var/ton-work/db && \
|
||||
mkdir -p /var/ton-work/db/static
|
||||
RUN mkdir -p /var/ton-work/db /var/ton-work/scripts
|
||||
|
||||
COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon /usr/local/bin/
|
||||
COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon-cli /usr/local/bin/
|
||||
|
@ -33,7 +37,7 @@ COPY --from=builder /ton/build/validator-engine-console/validator-engine-console
|
|||
COPY --from=builder /ton/build/utils/generate-random-id /usr/local/bin/
|
||||
|
||||
WORKDIR /var/ton-work/db
|
||||
COPY ./docker/init.sh ./docker/control.template ./
|
||||
RUN chmod +x init.sh
|
||||
COPY ./docker/init.sh ./docker/control.template /var/ton-work/scripts/
|
||||
RUN chmod +x /var/ton-work/scripts/init.sh
|
||||
|
||||
ENTRYPOINT ["/var/ton-work/db/init.sh"]
|
||||
ENTRYPOINT ["/var/ton-work/scripts/init.sh"]
|
||||
|
|
|
@ -91,7 +91,7 @@ td::Status AdnlInboundConnection::process_custom_packet(td::BufferSlice &data, b
|
|||
auto F = fetch_tl_object<ton_api::tcp_authentificate>(data.clone(), true);
|
||||
if (F.is_ok()) {
|
||||
if (nonce_.size() > 0 || !remote_id_.is_zero()) {
|
||||
return td::Status::Error(ErrorCode::protoviolation, "duplicate authentificate");
|
||||
return td::Status::Error(ErrorCode::protoviolation, "duplicate authenticate");
|
||||
}
|
||||
auto f = F.move_as_ok();
|
||||
nonce_ = td::SecureString{f->nonce_.size() + 256};
|
||||
|
|
|
@ -504,6 +504,12 @@ void AdnlPeerPairImpl::create_channel(pubkeys::Ed25519 pub, td::uint32 date) {
|
|||
|
||||
void AdnlPeerPairImpl::process_message(const adnlmessage::AdnlMessageCreateChannel &message) {
|
||||
create_channel(message.key(), message.date());
|
||||
if (respond_to_channel_create_after_.is_in_past()) {
|
||||
respond_to_channel_create_after_ = td::Timestamp::in(td::Random::fast(1.0, 2.0));
|
||||
std::vector<OutboundAdnlMessage> messages;
|
||||
messages.emplace_back(adnlmessage::AdnlMessageNop{}, 0);
|
||||
send_messages(std::move(messages));
|
||||
}
|
||||
}
|
||||
|
||||
void AdnlPeerPairImpl::process_message(const adnlmessage::AdnlMessageConfirmChannel &message) {
|
||||
|
|
|
@ -214,6 +214,7 @@ class AdnlPeerPairImpl : public AdnlPeerPair {
|
|||
pubkeys::Ed25519 channel_pub_;
|
||||
td::int32 channel_pk_date_;
|
||||
td::actor::ActorOwn<AdnlChannel> channel_;
|
||||
td::Timestamp respond_to_channel_create_after_;
|
||||
|
||||
td::uint64 in_seqno_ = 0;
|
||||
td::uint64 out_seqno_ = 0;
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
pipeline {
|
||||
|
||||
agent none
|
||||
stages {
|
||||
stage('Run Builds') {
|
||||
|
@ -12,7 +13,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/native/build-ubuntu-shared.sh .
|
||||
chmod +x build-ubuntu-shared.sh
|
||||
./build-ubuntu-shared.sh -t -a
|
||||
./build-ubuntu-shared.sh -a
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -31,7 +32,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/nix/build-linux-x86-64-nix.sh .
|
||||
chmod +x build-linux-x86-64-nix.sh
|
||||
./build-linux-x86-64-nix.sh -t
|
||||
./build-linux-x86-64-nix.sh
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -50,7 +51,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/native/build-ubuntu-shared.sh .
|
||||
chmod +x build-ubuntu-shared.sh
|
||||
./build-ubuntu-shared.sh -t -a
|
||||
./build-ubuntu-shared.sh -a
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -69,7 +70,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/nix/build-linux-arm64-nix.sh .
|
||||
chmod +x build-linux-arm64-nix.sh
|
||||
./build-linux-arm64-nix.sh -t
|
||||
./build-linux-arm64-nix.sh
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -88,7 +89,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/native/build-macos-shared.sh .
|
||||
chmod +x build-macos-shared.sh
|
||||
./build-macos-shared.sh -t -a
|
||||
./build-macos-shared.sh -a
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -107,7 +108,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/nix/build-macos-nix.sh .
|
||||
chmod +x build-macos-nix.sh
|
||||
./build-macos-nix.sh -t
|
||||
./build-macos-nix.sh
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -126,7 +127,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/native/build-macos-shared.sh .
|
||||
chmod +x build-macos-shared.sh
|
||||
./build-macos-shared.sh -t -a
|
||||
./build-macos-shared.sh -a
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -145,7 +146,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/nix/build-macos-nix.sh .
|
||||
chmod +x build-macos-nix.sh
|
||||
./build-macos-nix.sh -t
|
||||
./build-macos-nix.sh
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -164,7 +165,7 @@ pipeline {
|
|||
sh '''
|
||||
cp assembly/native/build-macos-shared.sh .
|
||||
chmod +x build-macos-shared.sh
|
||||
./build-macos-shared.sh -t -a
|
||||
./build-macos-shared.sh -a
|
||||
'''
|
||||
sh '''
|
||||
cd artifacts
|
||||
|
@ -182,7 +183,7 @@ pipeline {
|
|||
timeout(time: 180, unit: 'MINUTES') {
|
||||
bat '''
|
||||
copy assembly\\native\\build-windows.bat .
|
||||
build-windows.bat -t
|
||||
build-windows.bat
|
||||
'''
|
||||
bat '''
|
||||
cd artifacts
|
||||
|
@ -218,7 +219,7 @@ pipeline {
|
|||
steps {
|
||||
timeout(time: 180, unit: 'MINUTES') {
|
||||
sh '''
|
||||
cd assembly/wasm
|
||||
cp assembly/wasm/fift-func-wasm-build-ubuntu.sh .
|
||||
chmod +x fift-func-wasm-build-ubuntu.sh
|
||||
./fift-func-wasm-build-ubuntu.sh -a
|
||||
'''
|
||||
|
|
|
@ -158,7 +158,7 @@ if [ "$with_tests" = true ]; then
|
|||
http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \
|
||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \
|
||||
test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \
|
||||
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state
|
||||
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||
else
|
||||
ninja storage-daemon storage-daemon-cli blockchain-explorer \
|
||||
|
|
|
@ -86,7 +86,7 @@ if [ "$with_tests" = true ]; then
|
|||
http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \
|
||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \
|
||||
test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \
|
||||
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state
|
||||
test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||
else
|
||||
ninja storage-daemon storage-daemon-cli blockchain-explorer \
|
||||
|
|
|
@ -150,7 +150,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
|||
adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \
|
||||
test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \
|
||||
test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \
|
||||
test-fec test-tddb test-db test-validator-session-state
|
||||
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||
else
|
||||
ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
||||
|
|
|
@ -58,7 +58,7 @@ ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
|||
adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \
|
||||
test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \
|
||||
test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \
|
||||
test-fec test-tddb test-db test-validator-session-state
|
||||
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||
test $? -eq 0 || { echo "Can't compile ton"; exit 1; }
|
||||
else
|
||||
ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \
|
||||
|
|
221
assembly/native/build-windows-2019.bat
Normal file
221
assembly/native/build-windows-2019.bat
Normal file
|
@ -0,0 +1,221 @@
|
|||
REM execute this script inside elevated (Run as Administrator) console "x64 Native Tools Command Prompt for VS 2019"
|
||||
|
||||
echo off
|
||||
|
||||
echo Installing chocolatey windows package manager...
|
||||
@"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin"
|
||||
choco -?
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't install chocolatey
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
|
||||
choco feature enable -n allowEmptyChecksums
|
||||
|
||||
echo Installing pkgconfiglite...
|
||||
choco install -y pkgconfiglite
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't install pkgconfiglite
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
|
||||
echo Installing ninja...
|
||||
choco install -y ninja
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't install ninja
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
|
||||
if not exist "zlib" (
|
||||
git clone https://github.com/madler/zlib.git
|
||||
cd zlib
|
||||
git checkout v1.3.1
|
||||
cd contrib\vstudio\vc14
|
||||
msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142
|
||||
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't install zlib
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
cd ..\..\..\..
|
||||
) else (
|
||||
echo Using zlib...
|
||||
)
|
||||
|
||||
if not exist "lz4" (
|
||||
git clone https://github.com/lz4/lz4.git
|
||||
cd lz4
|
||||
git checkout v1.9.4
|
||||
cd build\VS2017\liblz4
|
||||
msbuild liblz4.vcxproj /p:Configuration=Release /p:platform=x64 -p:PlatformToolset=v142
|
||||
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't install lz4
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
cd ..\..\..\..
|
||||
) else (
|
||||
echo Using lz4...
|
||||
)
|
||||
|
||||
if not exist "secp256k1" (
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git
|
||||
cd secp256k1
|
||||
git checkout v0.3.2
|
||||
cmake -G "Visual Studio 16 2019" -A x64 -S . -B build -DSECP256K1_ENABLE_MODULE_RECOVERY=ON -DBUILD_SHARED_LIBS=OFF
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't configure secp256k1
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
cmake --build build --config Release
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't install secp256k1
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
cd ..
|
||||
) else (
|
||||
echo Using secp256k1...
|
||||
)
|
||||
|
||||
|
||||
curl --retry 5 --retry-delay 10 -Lo libsodium-1.0.18-stable-msvc.zip https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable-msvc.zip
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't download libsodium
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
unzip libsodium-1.0.18-stable-msvc.zip
|
||||
) else (
|
||||
echo Using libsodium...
|
||||
)
|
||||
|
||||
if not exist "openssl-3.1.4" (
|
||||
curl -Lo openssl-3.1.4.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-3.1.4.zip
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't download OpenSSL
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
unzip -q openssl-3.1.4.zip
|
||||
) else (
|
||||
echo Using openssl...
|
||||
)
|
||||
|
||||
if not exist "libmicrohttpd-0.9.77-w32-bin" (
|
||||
curl -Lo libmicrohttpd-0.9.77-w32-bin.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/libmicrohttpd-0.9.77-w32-bin.zip
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't download libmicrohttpd
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
unzip -q libmicrohttpd-0.9.77-w32-bin.zip
|
||||
) else (
|
||||
echo Using libmicrohttpd...
|
||||
)
|
||||
|
||||
if not exist "readline-5.0-1-lib" (
|
||||
curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't download readline
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
unzip -q -d readline-5.0-1-lib readline-5.0-1-lib.zip
|
||||
) else (
|
||||
echo Using readline...
|
||||
)
|
||||
|
||||
|
||||
set root=%cd%
|
||||
echo %root%
|
||||
set SODIUM_DIR=%root%\libsodium
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release ^
|
||||
-DPORTABLE=1 ^
|
||||
-DSODIUM_USE_STATIC_LIBS=1 ^
|
||||
-DSECP256K1_FOUND=1 ^
|
||||
-DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include ^
|
||||
-DSECP256K1_LIBRARY=%root%\secp256k1\build\src\Release\libsecp256k1.lib ^
|
||||
-DLZ4_FOUND=1 ^
|
||||
-DLZ4_INCLUDE_DIRS=%root%\lz4\lib ^
|
||||
-DLZ4_LIBRARIES=%root%\lz4\build\VS2017\liblz4\bin\x64_Release\liblz4_static.lib ^
|
||||
-DMHD_FOUND=1 ^
|
||||
-DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib ^
|
||||
-DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static ^
|
||||
-DZLIB_FOUND=1 ^
|
||||
-DZLIB_INCLUDE_DIR=%root%\zlib ^
|
||||
-DZLIB_LIBRARIES=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib ^
|
||||
-DOPENSSL_FOUND=1 ^
|
||||
-DOPENSSL_INCLUDE_DIR=%root%\openssl-3.1.4\x64\include ^
|
||||
-DOPENSSL_CRYPTO_LIBRARY=%root%\openssl-3.1.4\x64\lib\libcrypto_static.lib ^
|
||||
-DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include ^
|
||||
-DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib ^
|
||||
-DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj" ..
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't configure TON
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
|
||||
IF "%1"=="-t" (
|
||||
ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^
|
||||
tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^
|
||||
json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^
|
||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^
|
||||
test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^
|
||||
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't compile TON
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
) else (
|
||||
ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^
|
||||
tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^
|
||||
json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't compile TON
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
)
|
||||
|
||||
copy validator-engine\validator-engine.exe test
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo validator-engine.exe does not exist
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
|
||||
IF "%1"=="-t" (
|
||||
echo Running tests...
|
||||
REM ctest -C Release --output-on-failure -E "test-catchain|test-actors|test-validator-session-state"
|
||||
ctest -C Release --output-on-failure -E "test-bigint" --timeout 1800
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Some tests failed
|
||||
exit /b %errorlevel%
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
echo Creating artifacts...
|
||||
cd ..
|
||||
mkdir artifacts
|
||||
mkdir artifacts\smartcont
|
||||
mkdir artifacts\lib
|
||||
|
||||
for %%I in (build\storage\storage-daemon\storage-daemon.exe ^
|
||||
build\storage\storage-daemon\storage-daemon-cli.exe ^
|
||||
build\blockchain-explorer\blockchain-explorer.exe ^
|
||||
build\crypto\fift.exe ^
|
||||
build\crypto\tlbc.exe ^
|
||||
build\crypto\func.exe ^
|
||||
build\crypto\create-state.exe ^
|
||||
build\validator-engine-console\validator-engine-console.exe ^
|
||||
build\tonlib\tonlib-cli.exe ^
|
||||
build\tonlib\tonlibjson.dll ^
|
||||
build\http\http-proxy.exe ^
|
||||
build\rldp-http-proxy\rldp-http-proxy.exe ^
|
||||
build\dht-server\dht-server.exe ^
|
||||
build\lite-client\lite-client.exe ^
|
||||
build\validator-engine\validator-engine.exe ^
|
||||
build\utils\generate-random-id.exe ^
|
||||
build\utils\json2tlo.exe ^
|
||||
build\adnl\adnl-proxy.exe ^
|
||||
build\emulator\emulator.dll) do (strip -g %%I & copy %%I artifacts\)
|
||||
xcopy /e /k /h /i crypto\smartcont artifacts\smartcont
|
||||
xcopy /e /k /h /i crypto\fift\lib artifacts\lib
|
2
assembly/native/build-windows-github-2019.bat
Normal file
2
assembly/native/build-windows-github-2019.bat
Normal file
|
@ -0,0 +1,2 @@
|
|||
call "C:\Program Files (x86)\Microsoft Visual Studio\2019\%1\VC\Auxiliary\Build\vcvars64.bat"
|
||||
call build-windows-2019.bat -t
|
|
@ -1,2 +1,2 @@
|
|||
call "C:\Program Files\Microsoft Visual Studio\2022\%1\VC\Auxiliary\Build\vcvars64.bat"
|
||||
call build-windows.bat -t
|
||||
call build-windows.bat -t
|
||||
|
|
|
@ -161,7 +161,7 @@ tonlib-cli validator-engine lite-client pow-miner validator-engine-console gener
|
|||
json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^
|
||||
test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^
|
||||
test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^
|
||||
test-fec test-tddb test-db test-validator-session-state
|
||||
test-fec test-tddb test-db test-validator-session-state test-emulator
|
||||
IF %errorlevel% NEQ 0 (
|
||||
echo Can't compile TON
|
||||
exit /b %errorlevel%
|
||||
|
|
|
@ -1,5 +1,3 @@
|
|||
# The script builds funcfift compiler to WASM
|
||||
|
||||
# Execute these prerequisites first
|
||||
# sudo apt update
|
||||
# sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev \
|
||||
|
@ -11,10 +9,12 @@
|
|||
# sudo ./llvm.sh 16 all
|
||||
|
||||
with_artifacts=false
|
||||
scratch_new=false
|
||||
|
||||
while getopts 'a' flag; do
|
||||
while getopts 'af' flag; do
|
||||
case "${flag}" in
|
||||
a) with_artifacts=true ;;
|
||||
f) scratch_new=true ;;
|
||||
*) break
|
||||
;;
|
||||
esac
|
||||
|
@ -24,108 +24,139 @@ export CC=$(which clang-16)
|
|||
export CXX=$(which clang++-16)
|
||||
export CCACHE_DISABLE=1
|
||||
|
||||
cd ../..
|
||||
rm -rf openssl zlib emsdk secp256k1 libsodium build
|
||||
echo `pwd`
|
||||
if [ "$scratch_new" = true ]; then
|
||||
echo Compiling openssl zlib lz4 emsdk secp256k1 libsodium emsdk ton
|
||||
rm -rf openssl zlib lz4 emsdk secp256k1 libsodium build
|
||||
fi
|
||||
|
||||
git clone https://github.com/openssl/openssl.git
|
||||
cd openssl
|
||||
git checkout checkout openssl-3.1.4
|
||||
./config
|
||||
make -j16
|
||||
OPENSSL_DIR=`pwd`
|
||||
cd ..
|
||||
|
||||
git clone https://github.com/madler/zlib.git
|
||||
cd zlib
|
||||
ZLIB_DIR=`pwd`
|
||||
cd ..
|
||||
if [ ! -d "openssl" ]; then
|
||||
git clone https://github.com/openssl/openssl.git
|
||||
cd openssl
|
||||
git checkout openssl-3.1.4
|
||||
./config
|
||||
make -j16
|
||||
OPENSSL_DIR=`pwd`
|
||||
cd ..
|
||||
else
|
||||
OPENSSL_DIR=`pwd`/openssl
|
||||
echo Using compiled openssl at $OPENSSL_DIR
|
||||
fi
|
||||
|
||||
git clone https://github.com/lz4/lz4.git
|
||||
cd lz4
|
||||
LZ4_DIR=`pwd`
|
||||
cd ..
|
||||
if [ ! -d "build" ]; then
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CXX_STANDARD=17 \
|
||||
-DOPENSSL_FOUND=1 \
|
||||
-DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \
|
||||
-DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so \
|
||||
-DTON_USE_ABSEIL=OFF ..
|
||||
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git
|
||||
cd secp256k1
|
||||
./autogen.sh
|
||||
SECP256K1_DIR=`pwd`
|
||||
cd ..
|
||||
test $? -eq 0 || { echo "Can't configure TON build"; exit 1; }
|
||||
ninja fift smc-envelope
|
||||
test $? -eq 0 || { echo "Can't compile fift "; exit 1; }
|
||||
rm -rf *
|
||||
cd ..
|
||||
else
|
||||
echo cleaning build...
|
||||
rm -rf build/*
|
||||
fi
|
||||
|
||||
git clone https://github.com/jedisct1/libsodium --branch stable
|
||||
cd libsodium
|
||||
SODIUM_DIR=`pwd`
|
||||
cd ..
|
||||
if [ ! -d "emsdk" ]; then
|
||||
git clone https://github.com/emscripten-core/emsdk.git
|
||||
echo
|
||||
echo Using cloned emsdk
|
||||
fi
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -GNinja -DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_CXX_STANDARD=17 \
|
||||
-DOPENSSL_FOUND=1 \
|
||||
-DOPENSSL_ROOT_DIR=$OPENSSL_DIR \
|
||||
-DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \
|
||||
-DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so \
|
||||
-DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.so \
|
||||
-DTON_USE_ABSEIL=OFF ..
|
||||
|
||||
test $? -eq 0 || { echo "Can't configure TON build"; exit 1; }
|
||||
|
||||
ninja fift smc-envelope
|
||||
|
||||
test $? -eq 0 || { echo "Can't compile fift "; exit 1; }
|
||||
|
||||
rm -rf *
|
||||
|
||||
cd ..
|
||||
|
||||
git clone https://github.com/emscripten-core/emsdk.git
|
||||
cd emsdk
|
||||
./emsdk install 3.1.19
|
||||
./emsdk activate 3.1.19
|
||||
EMSDK_DIR=`pwd`
|
||||
ls $EMSDK_DIR
|
||||
|
||||
. $EMSDK_DIR/emsdk_env.sh
|
||||
export CC=$(which emcc)
|
||||
export CXX=$(which em++)
|
||||
export CCACHE_DISABLE=1
|
||||
|
||||
cd ../openssl
|
||||
cd ..
|
||||
|
||||
make clean
|
||||
emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test
|
||||
sed -i 's/CROSS_COMPILE=.*/CROSS_COMPILE=/g' Makefile
|
||||
sed -i 's/-ldl//g' Makefile
|
||||
sed -i 's/-O3/-Os/g' Makefile
|
||||
emmake make depend
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile OpenSSL with emmake "; exit 1; }
|
||||
if [ ! -f "openssl/openssl_em" ]; then
|
||||
cd openssl
|
||||
make clean
|
||||
emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test
|
||||
sed -i 's/CROSS_COMPILE=.*/CROSS_COMPILE=/g' Makefile
|
||||
sed -i 's/-ldl//g' Makefile
|
||||
sed -i 's/-O3/-Os/g' Makefile
|
||||
emmake make depend
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile OpenSSL with emmake "; exit 1; }
|
||||
touch openssl_em
|
||||
cd ..
|
||||
else
|
||||
echo Using compiled openssl with emscripten
|
||||
fi
|
||||
|
||||
cd ../zlib
|
||||
if [ ! -d "zlib" ]; then
|
||||
git clone https://github.com/madler/zlib.git
|
||||
cd zlib
|
||||
git checkout v1.3.1
|
||||
ZLIB_DIR=`pwd`
|
||||
emconfigure ./configure --static
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile zlib with emmake "; exit 1; }
|
||||
cd ..
|
||||
else
|
||||
ZLIB_DIR=`pwd`/zlib
|
||||
echo Using compiled zlib with emscripten at $ZLIB_DIR
|
||||
fi
|
||||
|
||||
emconfigure ./configure --static
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile zlib with emmake "; exit 1; }
|
||||
if [ ! -d "lz4" ]; then
|
||||
git clone https://github.com/lz4/lz4.git
|
||||
cd lz4
|
||||
git checkout v1.9.4
|
||||
LZ4_DIR=`pwd`
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile lz4 with emmake "; exit 1; }
|
||||
cd ..
|
||||
else
|
||||
LZ4_DIR=`pwd`/lz4
|
||||
echo Using compiled lz4 with emscripten at $LZ4_DIR
|
||||
fi
|
||||
|
||||
cd ../lz4
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile lz4 with emmake "; exit 1; }
|
||||
if [ ! -d "secp256k1" ]; then
|
||||
git clone https://github.com/bitcoin-core/secp256k1.git
|
||||
cd secp256k1
|
||||
git checkout v0.3.2
|
||||
./autogen.sh
|
||||
SECP256K1_DIR=`pwd`
|
||||
emconfigure ./configure --enable-module-recovery
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile secp256k1 with emmake "; exit 1; }
|
||||
cd ..
|
||||
else
|
||||
SECP256K1_DIR=`pwd`/secp256k1
|
||||
echo Using compiled secp256k1 with emscripten at $SECP256K1_DIR
|
||||
fi
|
||||
|
||||
cd ../secp256k1
|
||||
if [ ! -d "libsodium" ]; then
|
||||
git clone https://github.com/jedisct1/libsodium
|
||||
cd libsodium
|
||||
git checkout 1.0.18-RELEASE
|
||||
SODIUM_DIR=`pwd`
|
||||
emconfigure ./configure --disable-ssp
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; }
|
||||
cd ..
|
||||
else
|
||||
SODIUM_DIR=`pwd`/libsodium
|
||||
echo Using compiled libsodium with emscripten at $SODIUM_DIR
|
||||
fi
|
||||
|
||||
emconfigure ./configure --enable-module-recovery
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile secp256k1 with emmake "; exit 1; }
|
||||
cd build
|
||||
|
||||
cd ../libsodium
|
||||
|
||||
emconfigure ./configure --disable-ssp
|
||||
emmake make -j16
|
||||
test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; }
|
||||
|
||||
cd ../build
|
||||
|
||||
emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release \
|
||||
emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
|
||||
-DZLIB_FOUND=1 \
|
||||
-DZLIB_LIBRARIES=$ZLIB_DIR/libz.a \
|
||||
-DZLIB_INCLUDE_DIR=$ZLIB_DIR \
|
||||
|
@ -133,18 +164,15 @@ emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release \
|
|||
-DLZ4_LIBRARIES=$LZ4_DIR/lib/liblz4.a \
|
||||
-DLZ4_INCLUDE_DIRS=$LZ4_DIR/lib \
|
||||
-DOPENSSL_FOUND=1 \
|
||||
-DOPENSSL_ROOT_DIR=$OPENSSL_DIR \
|
||||
-DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \
|
||||
-DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.a \
|
||||
-DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.a \
|
||||
-DCMAKE_TOOLCHAIN_FILE=$EMSDK_DIR/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake \
|
||||
-DCMAKE_CXX_FLAGS="-sUSE_ZLIB=1" \
|
||||
-DSECP256K1_FOUND=1 \
|
||||
-DSECP256K1_INCLUDE_DIR=$SECP256K1_DIR/include \
|
||||
-DSECP256K1_LIBRARY=$SECP256K1_DIR/.libs/libsecp256k1.a \
|
||||
-DSODIUM_INCLUDE_DIR=$SODIUM_DIR/src/libsodium/include \
|
||||
-DSODIUM_LIBRARY_RELEASE=$SODIUM_DIR/src/libsodium/.libs/libsodium.a \
|
||||
-DSODIUM_USE_STATIC_LIBS=ON ..
|
||||
..
|
||||
|
||||
test $? -eq 0 || { echo "Can't configure TON with emmake "; exit 1; }
|
||||
cp -R ../crypto/smartcont ../crypto/fift/lib crypto
|
||||
|
|
|
@ -19,6 +19,6 @@
|
|||
namespace ton {
|
||||
|
||||
// See doc/GlobalVersions.md
|
||||
const int SUPPORTED_VERSION = 7;
|
||||
const int SUPPORTED_VERSION = 8;
|
||||
|
||||
}
|
||||
|
|
|
@ -358,7 +358,8 @@ target_link_libraries(test-ed25519-crypto PUBLIC ton_crypto)
|
|||
|
||||
add_library(fift-lib STATIC ${FIFT_SOURCE})
|
||||
target_include_directories(fift-lib PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}>)
|
||||
target_link_libraries(fift-lib PUBLIC ton_crypto ton_db tdutils ton_block)
|
||||
target_link_libraries(fift-lib PUBLIC ton_crypto tdutils ton_block)
|
||||
|
||||
if (USE_EMSCRIPTEN)
|
||||
target_link_options(fift-lib PRIVATE -fexceptions)
|
||||
target_compile_options(fift-lib PRIVATE -fexceptions)
|
||||
|
|
|
@ -813,19 +813,45 @@ int IntermediateAddress::get_size(const vm::CellSlice& cs) const {
|
|||
const IntermediateAddress t_IntermediateAddress;
|
||||
|
||||
bool MsgEnvelope::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress
|
||||
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams
|
||||
&& t_Ref_Message.validate_skip(ops, cs, weak); // msg:^Message
|
||||
switch (get_tag(cs)) {
|
||||
case 4:
|
||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress
|
||||
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams
|
||||
&& t_Ref_Message.validate_skip(ops, cs, weak); // msg:^Message
|
||||
case 5:
|
||||
return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5
|
||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.validate_skip(ops, cs, weak) // next_addr:IntermediateAddress
|
||||
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee_remaining:Grams
|
||||
&& t_Ref_Message.validate_skip(ops, cs, weak) // msg:^Message
|
||||
&& Maybe<UInt>(64).validate_skip(ops, cs, weak) // emitted_lt:(Maybe uint64)
|
||||
&& Maybe<gen::MsgMetadata>().validate_skip(ops, cs, weak); // metadata:(Maybe MsgMetadata)
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MsgEnvelope::skip(vm::CellSlice& cs) const {
|
||||
return cs.advance(4) // msg_envelope#4
|
||||
&& t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress
|
||||
&& t_Grams.skip(cs) // fwd_fee_remaining:Grams
|
||||
&& t_Ref_Message.skip(cs); // msg:^Message
|
||||
switch (get_tag(cs)) {
|
||||
case 4:
|
||||
return cs.advance(4) // msg_envelope#4
|
||||
&& t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress
|
||||
&& t_Grams.skip(cs) // fwd_fee_remaining:Grams
|
||||
&& t_Ref_Message.skip(cs); // msg:^Message
|
||||
case 5:
|
||||
return cs.advance(4) // msg_envelope_v2#5
|
||||
&& t_IntermediateAddress.skip(cs) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.skip(cs) // next_addr:IntermediateAddress
|
||||
&& t_Grams.skip(cs) // fwd_fee_remaining:Grams
|
||||
&& t_Ref_Message.skip(cs) // msg:^Message
|
||||
&& Maybe<UInt>(64).skip(cs) // emitted_lt:(Maybe uint64)
|
||||
&& Maybe<gen::MsgMetadata>().skip(cs); // metadata:(Maybe MsgMetadata)
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MsgEnvelope::extract_fwd_fees_remaining(vm::CellSlice& cs) const {
|
||||
|
@ -833,34 +859,101 @@ bool MsgEnvelope::extract_fwd_fees_remaining(vm::CellSlice& cs) const {
|
|||
}
|
||||
|
||||
bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record& data) const {
|
||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||
&& t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||
&& t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
||||
switch (get_tag(cs)) {
|
||||
case 4:
|
||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||
&& t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||
&& t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
||||
case 5:
|
||||
return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5
|
||||
&& t_IntermediateAddress.fetch_to(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.fetch_to(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||
&& t_Grams.fetch_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||
&& cs.fetch_ref_to(data.msg) // msg:^Message
|
||||
&& Maybe<UInt>(64).skip(cs) // emitted_lt:(Maybe uint64)
|
||||
&& Maybe<gen::MsgMetadata>().skip(cs); // metadata:(Maybe MsgMetadata)
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MsgEnvelope::unpack(vm::CellSlice& cs, MsgEnvelope::Record_std& data) const {
|
||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||
&& t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||
&& t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
||||
data.emitted_lt = {};
|
||||
data.metadata = {};
|
||||
switch (get_tag(cs)) {
|
||||
case 4:
|
||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||
&& t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||
&& t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||
&& cs.fetch_ref_to(data.msg); // msg:^Message
|
||||
case 5: {
|
||||
bool with_metadata, with_emitted_lt;
|
||||
return cs.fetch_ulong(4) == 5 // msg_envelope_v2#5
|
||||
&& t_IntermediateAddress.fetch_regular(cs, data.cur_addr) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.fetch_regular(cs, data.next_addr) // next_addr:IntermediateAddress
|
||||
&& t_Grams.as_integer_skip_to(cs, data.fwd_fee_remaining) // fwd_fee_remaining:Grams
|
||||
&& cs.fetch_ref_to(data.msg) // msg:^Message
|
||||
&& cs.fetch_bool_to(with_emitted_lt) &&
|
||||
(!with_emitted_lt || cs.fetch_uint_to(64, data.emitted_lt.value_force())) // emitted_lt:(Maybe uint64)
|
||||
&& cs.fetch_bool_to(with_metadata) &&
|
||||
(!with_metadata || data.metadata.value_force().unpack(cs)); // metadata:(Maybe MsgMetadata)
|
||||
}
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool MsgEnvelope::unpack_std(vm::CellSlice& cs, int& cur_a, int& nhop_a, Ref<vm::Cell>& msg) const {
|
||||
return cs.fetch_ulong(4) == 4 // msg_envelope#4
|
||||
&& t_IntermediateAddress.fetch_regular(cs, cur_a) // cur_addr:IntermediateAddress
|
||||
&& t_IntermediateAddress.fetch_regular(cs, nhop_a) // next_addr:IntermediateAddress
|
||||
&& cs.fetch_ref_to(msg);
|
||||
bool MsgEnvelope::pack(vm::CellBuilder& cb, const Record_std& data) const {
|
||||
bool v2 = (bool)data.metadata || (bool)data.emitted_lt;
|
||||
if (!(cb.store_long_bool(v2 ? 5 : 4, 4) && // msg_envelope#4 / msg_envelope_v2#5
|
||||
cb.store_long_bool(data.cur_addr, 8) && // cur_addr:IntermediateAddress
|
||||
cb.store_long_bool(data.next_addr, 8) && // next_addr:IntermediateAddress
|
||||
t_Grams.store_integer_ref(cb, data.fwd_fee_remaining) && // fwd_fee_remaining:Grams
|
||||
cb.store_ref_bool(data.msg))) { // msg:^Message
|
||||
return false;
|
||||
}
|
||||
if (v2) {
|
||||
if (!(cb.store_bool_bool((bool)data.emitted_lt) &&
|
||||
(!data.emitted_lt || cb.store_long_bool(data.emitted_lt.value(), 64)))) { // emitted_lt:(Maybe uint64)
|
||||
return false;
|
||||
}
|
||||
if (!(cb.store_bool_bool((bool)data.metadata) &&
|
||||
(!data.metadata || data.metadata.value().pack(cb)))) { // metadata:(Maybe MsgMetadata)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MsgEnvelope::get_created_lt(const vm::CellSlice& cs, unsigned long long& created_lt) const {
|
||||
bool MsgEnvelope::pack_cell(td::Ref<vm::Cell>& cell, const Record_std& data) const {
|
||||
vm::CellBuilder cb;
|
||||
return pack(cb, data) && cb.finalize_to(cell);
|
||||
}
|
||||
|
||||
bool MsgEnvelope::get_emitted_lt(const vm::CellSlice& cs, unsigned long long& emitted_lt) const {
|
||||
// Emitted lt is emitted_lt from MsgEnvelope (if present), otherwise created_lt
|
||||
if (!cs.size_refs()) {
|
||||
return false;
|
||||
}
|
||||
if (get_tag(cs) == 5) {
|
||||
vm::CellSlice cs2 = cs;
|
||||
// msg_envelope_v2#5 cur_addr:IntermediateAddress
|
||||
// next_addr:IntermediateAddress fwd_fee_remaining:Grams
|
||||
// msg:^(Message Any) emitted_lt:(Maybe uint64) ...
|
||||
bool have_emitted_lt;
|
||||
if (!(cs2.skip_first(4) && t_IntermediateAddress.skip(cs2) && t_IntermediateAddress.skip(cs2) &&
|
||||
t_Grams.skip(cs2) && t_Ref_Message.skip(cs2) && cs2.fetch_bool_to(have_emitted_lt))) {
|
||||
return false;
|
||||
}
|
||||
if (have_emitted_lt) {
|
||||
return cs2.fetch_ulong_bool(64, emitted_lt);
|
||||
}
|
||||
}
|
||||
auto msg_cs = load_cell_slice(cs.prefetch_ref());
|
||||
return t_Message.get_created_lt(msg_cs, created_lt);
|
||||
return t_Message.get_created_lt(msg_cs, emitted_lt);
|
||||
}
|
||||
|
||||
const MsgEnvelope t_MsgEnvelope;
|
||||
|
@ -1692,6 +1785,15 @@ bool InMsg::skip(vm::CellSlice& cs) const {
|
|||
&& cs.advance(64) // transaction_id:uint64
|
||||
&& t_Grams.skip(cs) // fwd_fee:Grams
|
||||
&& t_RefCell.skip(cs); // proof_delivered:^Cell
|
||||
case msg_import_deferred_fin:
|
||||
return cs.advance(5) // msg_import_deferred_fin$00100
|
||||
&& t_Ref_MsgEnvelope.skip(cs) // in_msg:^MsgEnvelope
|
||||
&& t_Ref_Transaction.skip(cs) // transaction:^Transaction
|
||||
&& t_Grams.skip(cs); // fwd_fee:Grams
|
||||
case msg_import_deferred_tr:
|
||||
return cs.advance(5) // msg_import_deferred_tr$00101
|
||||
&& t_Ref_MsgEnvelope.skip(cs) // in_msg:^MsgEnvelope
|
||||
&& t_Ref_MsgEnvelope.skip(cs); // out_msg:^MsgEnvelope
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1734,12 +1836,22 @@ bool InMsg::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
|||
&& cs.advance(64) // transaction_id:uint64
|
||||
&& t_Grams.validate_skip(ops, cs, weak) // fwd_fee:Grams
|
||||
&& t_RefCell.validate_skip(ops, cs, weak); // proof_delivered:^Cell
|
||||
case msg_import_deferred_fin:
|
||||
return cs.advance(5) // msg_import_deferred_fin$00100
|
||||
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // in_msg:^MsgEnvelope
|
||||
&& t_Ref_Transaction.validate_skip(ops, cs, weak) // transaction:^Transaction
|
||||
&& t_Grams.validate_skip(ops, cs, weak); // fwd_fee:Grams
|
||||
case msg_import_deferred_tr:
|
||||
return cs.advance(5) // msg_import_deferred_tr$00101
|
||||
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // in_msg:^MsgEnvelope
|
||||
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak); // out_msg:^MsgEnvelope
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||
switch (get_tag(cs)) {
|
||||
int tag = get_tag(cs);
|
||||
switch (tag) {
|
||||
case msg_import_ext: // inbound external message
|
||||
return t_ImportFees.null_value(cb); // external messages have no value and no import fees
|
||||
case msg_import_ihr: // IHR-forwarded internal message to its final destination
|
||||
|
@ -1765,8 +1877,9 @@ bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
|||
&& t_CurrencyCollection.null_value(cb); // value_imported := 0
|
||||
}
|
||||
return false;
|
||||
case msg_import_fin: // internal message delivered to its final destination in this block
|
||||
if (cs.advance(3) && cs.size_refs() >= 2) {
|
||||
case msg_import_fin: // internal message delivered to its final destination in this block
|
||||
case msg_import_deferred_fin: // internal message from DispatchQueue to its final destination in this block
|
||||
if (cs.advance(tag == msg_import_fin ? 3 : 5) && cs.size_refs() >= 2) {
|
||||
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
||||
MsgEnvelope::Record in_msg;
|
||||
td::RefInt256 fwd_fee, fwd_fee_remaining, value_grams, ihr_fee;
|
||||
|
@ -1787,13 +1900,14 @@ bool InMsg::get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
|||
msg_info.value.write()); // value_imported = msg.value + msg.ihr_fee + fwd_fee_remaining
|
||||
}
|
||||
return false;
|
||||
case msg_import_tr: // transit internal message
|
||||
if (cs.advance(3) && cs.size_refs() >= 2) {
|
||||
case msg_import_tr: // transit internal message
|
||||
case msg_import_deferred_tr: // internal message from DispatchQueue to OutMsgQueue
|
||||
if (cs.advance(tag == msg_import_tr ? 3 : 5) && cs.size_refs() >= 2) {
|
||||
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
||||
MsgEnvelope::Record in_msg;
|
||||
td::RefInt256 transit_fee, fwd_fee_remaining, value_grams, ihr_fee;
|
||||
td::RefInt256 transit_fee = td::zero_refint(), fwd_fee_remaining, value_grams, ihr_fee;
|
||||
if (!(t_MsgEnvelope.unpack(msg_env_cs, in_msg) && cs.fetch_ref().not_null() &&
|
||||
t_Grams.as_integer_skip_to(cs, transit_fee) &&
|
||||
(tag == msg_import_deferred_tr || t_Grams.as_integer_skip_to(cs, transit_fee)) &&
|
||||
(fwd_fee_remaining = t_Grams.as_integer(in_msg.fwd_fee_remaining)).not_null() &&
|
||||
cmp(transit_fee, fwd_fee_remaining) <= 0)) {
|
||||
return false;
|
||||
|
@ -1871,6 +1985,14 @@ bool OutMsg::skip(vm::CellSlice& cs) const {
|
|||
return cs.advance(3) // msg_export_tr_req$111
|
||||
&& t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope
|
||||
&& RefTo<InMsg>{}.skip(cs); // imported:^InMsg
|
||||
case msg_export_new_defer:
|
||||
return cs.advance(5) // msg_export_new_defer$10100
|
||||
&& t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope
|
||||
&& t_Ref_Transaction.skip(cs); // transaction:^Transaction
|
||||
case msg_export_deferred_tr:
|
||||
return cs.advance(5) // msg_export_deferred_tr$10101
|
||||
&& t_Ref_MsgEnvelope.skip(cs) // out_msg:^MsgEnvelope
|
||||
&& RefTo<InMsg>{}.skip(cs); // imported:^InMsg
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1910,12 +2032,21 @@ bool OutMsg::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
|||
return cs.advance(3) // msg_export_tr_req$111
|
||||
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope
|
||||
&& RefTo<InMsg>{}.validate_skip(ops, cs, weak); // imported:^InMsg
|
||||
case msg_export_new_defer:
|
||||
return cs.advance(5) // msg_export_new_defer$10100
|
||||
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope
|
||||
&& t_Ref_Transaction.validate_skip(ops, cs, weak); // transaction:^Transaction
|
||||
case msg_export_deferred_tr:
|
||||
return cs.advance(5) // msg_export_deferred_tr$10101
|
||||
&& t_Ref_MsgEnvelope.validate_skip(ops, cs, weak) // out_msg:^MsgEnvelope
|
||||
&& RefTo<InMsg>{}.validate_skip(ops, cs, weak); // imported:^InMsg
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||
switch (get_tag(cs)) {
|
||||
auto tag = get_tag(cs);
|
||||
switch (tag) {
|
||||
case msg_export_ext: // external outbound message carries no value
|
||||
if (cs.have(3, 2)) {
|
||||
return t_CurrencyCollection.null_value(cb);
|
||||
|
@ -1929,10 +2060,13 @@ bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
|||
return cs.have(4 + 63, 1) && t_CurrencyCollection.null_value(cb);
|
||||
case msg_export_deq_short: // dequeueing record for outbound message, no exported value
|
||||
return cs.have(4 + 256 + 32 + 64 + 64) && t_CurrencyCollection.null_value(cb);
|
||||
case msg_export_new: // newly-generated outbound internal message, queued
|
||||
case msg_export_tr: // transit internal message, queued
|
||||
case msg_export_tr_req: // transit internal message, re-queued from this shardchain
|
||||
if (cs.advance(3) && cs.size_refs() >= 2) {
|
||||
case msg_export_new: // newly-generated outbound internal message, queued
|
||||
case msg_export_tr: // transit internal message, queued
|
||||
case msg_export_tr_req: // transit internal message, re-queued from this shardchain
|
||||
case msg_export_new_defer: // newly-generated outbound internal message, deferred
|
||||
case msg_export_deferred_tr: // internal message from DispatchQueue, queued
|
||||
int tag_len = (tag == msg_export_new_defer || tag == msg_export_deferred_tr) ? 5 : 3;
|
||||
if (cs.advance(tag_len) && cs.size_refs() >= 2) {
|
||||
auto msg_env_cs = load_cell_slice(cs.fetch_ref());
|
||||
MsgEnvelope::Record out_msg;
|
||||
if (!(cs.fetch_ref().not_null() && t_MsgEnvelope.unpack(msg_env_cs, out_msg))) {
|
||||
|
@ -1954,12 +2088,12 @@ bool OutMsg::get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
|||
return false;
|
||||
}
|
||||
|
||||
bool OutMsg::get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) const {
|
||||
bool OutMsg::get_emitted_lt(vm::CellSlice& cs, unsigned long long& emitted_lt) const {
|
||||
switch (get_tag(cs)) {
|
||||
case msg_export_ext:
|
||||
if (cs.have(3, 1)) {
|
||||
auto msg_cs = load_cell_slice(cs.prefetch_ref());
|
||||
return t_Message.get_created_lt(msg_cs, created_lt);
|
||||
return t_Message.get_created_lt(msg_cs, emitted_lt);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -1970,9 +2104,11 @@ bool OutMsg::get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) c
|
|||
case msg_export_deq_short:
|
||||
case msg_export_deq_imm:
|
||||
case msg_export_tr_req:
|
||||
case msg_export_new_defer:
|
||||
case msg_export_deferred_tr:
|
||||
if (cs.have(3, 1)) {
|
||||
auto out_msg_cs = load_cell_slice(cs.prefetch_ref());
|
||||
return t_MsgEnvelope.get_created_lt(out_msg_cs, created_lt);
|
||||
return t_MsgEnvelope.get_emitted_lt(out_msg_cs, emitted_lt);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
|
@ -2003,26 +2139,53 @@ bool Aug_OutMsgQueue::eval_empty(vm::CellBuilder& cb) const {
|
|||
|
||||
bool Aug_OutMsgQueue::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||
Ref<vm::Cell> msg_env;
|
||||
unsigned long long created_lt;
|
||||
return cs.fetch_ref_to(msg_env) && t_MsgEnvelope.get_created_lt(load_cell_slice(std::move(msg_env)), created_lt) &&
|
||||
cb.store_ulong_rchk_bool(created_lt, 64);
|
||||
unsigned long long emitted_lt;
|
||||
return cs.fetch_ref_to(msg_env) && t_MsgEnvelope.get_emitted_lt(load_cell_slice(std::move(msg_env)), emitted_lt) &&
|
||||
cb.store_ulong_rchk_bool(emitted_lt, 64);
|
||||
}
|
||||
|
||||
bool Aug_DispatchQueue::eval_fork(vm::CellBuilder& cb, vm::CellSlice& left_cs, vm::CellSlice& right_cs) const {
|
||||
unsigned long long x, y;
|
||||
return left_cs.fetch_ulong_bool(64, x) && right_cs.fetch_ulong_bool(64, y) &&
|
||||
cb.store_ulong_rchk_bool(std::min(x, y), 64);
|
||||
}
|
||||
|
||||
bool Aug_DispatchQueue::eval_empty(vm::CellBuilder& cb) const {
|
||||
return cb.store_long_bool(0, 64);
|
||||
}
|
||||
|
||||
bool Aug_DispatchQueue::eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const {
|
||||
Ref<vm::Cell> messages_root;
|
||||
if (!cs.fetch_maybe_ref(messages_root)) {
|
||||
return false;
|
||||
}
|
||||
vm::Dictionary messages{std::move(messages_root), 64};
|
||||
td::BitArray<64> key_buffer;
|
||||
td::uint64 key;
|
||||
if (messages.get_minmax_key(key_buffer.bits(), 64).is_null()) {
|
||||
key = (td::uint64)-1;
|
||||
} else {
|
||||
key = key_buffer.to_ulong();
|
||||
}
|
||||
return cb.store_long_bool(key, 64);
|
||||
}
|
||||
|
||||
const Aug_OutMsgQueue aug_OutMsgQueue;
|
||||
const Aug_DispatchQueue aug_DispatchQueue;
|
||||
const OutMsgQueue t_OutMsgQueue;
|
||||
|
||||
const ProcessedUpto t_ProcessedUpto;
|
||||
const HashmapE t_ProcessedInfo{96, t_ProcessedUpto};
|
||||
const HashmapE t_IhrPendingInfo{256, t_uint128};
|
||||
|
||||
// _ out_queue:OutMsgQueue proc_info:ProcessedInfo = OutMsgQueueInfo;
|
||||
// _ out_queue:OutMsgQueue proc_info:ProcessedInfo extra:(Maybe OutMsgQueueExtra) = OutMsgQueueInfo;
|
||||
bool OutMsgQueueInfo::skip(vm::CellSlice& cs) const {
|
||||
return t_OutMsgQueue.skip(cs) && t_ProcessedInfo.skip(cs) && t_IhrPendingInfo.skip(cs);
|
||||
return t_OutMsgQueue.skip(cs) && t_ProcessedInfo.skip(cs) && Maybe<gen::OutMsgQueueExtra>().skip(cs);
|
||||
}
|
||||
|
||||
bool OutMsgQueueInfo::validate_skip(int* ops, vm::CellSlice& cs, bool weak) const {
|
||||
return t_OutMsgQueue.validate_skip(ops, cs, weak) && t_ProcessedInfo.validate_skip(ops, cs, weak) &&
|
||||
t_IhrPendingInfo.validate_skip(ops, cs, weak);
|
||||
Maybe<gen::OutMsgQueueExtra>().validate_skip(ops, cs, weak);
|
||||
}
|
||||
|
||||
const OutMsgQueueInfo t_OutMsgQueueInfo;
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "td/utils/bits.h"
|
||||
#include "td/utils/StringBuilder.h"
|
||||
#include "ton/ton-types.h"
|
||||
#include "block-auto.h"
|
||||
|
||||
namespace block {
|
||||
|
||||
|
@ -469,11 +470,17 @@ struct MsgEnvelope final : TLB_Complex {
|
|||
int cur_addr, next_addr;
|
||||
td::RefInt256 fwd_fee_remaining;
|
||||
Ref<vm::Cell> msg;
|
||||
td::optional<ton::LogicalTime> emitted_lt;
|
||||
td::optional<MsgMetadata> metadata;
|
||||
};
|
||||
bool unpack(vm::CellSlice& cs, Record& data) const;
|
||||
bool unpack(vm::CellSlice& cs, Record_std& data) const;
|
||||
bool unpack_std(vm::CellSlice& cs, int& cur_a, int& nhop_a, Ref<vm::Cell>& msg) const;
|
||||
bool get_created_lt(const vm::CellSlice& cs, unsigned long long& created_lt) const;
|
||||
bool pack(vm::CellBuilder& cb, const Record_std& data) const;
|
||||
bool pack_cell(td::Ref<vm::Cell>& cell, const Record_std& data) const;
|
||||
bool get_emitted_lt(const vm::CellSlice& cs, unsigned long long& emitted_lt) const;
|
||||
int get_tag(const vm::CellSlice& cs) const override {
|
||||
return (int)cs.prefetch_ulong(4);
|
||||
}
|
||||
};
|
||||
|
||||
extern const MsgEnvelope t_MsgEnvelope;
|
||||
|
@ -801,12 +808,18 @@ struct InMsg final : TLB_Complex {
|
|||
msg_import_fin = 4,
|
||||
msg_import_tr = 5,
|
||||
msg_discard_fin = 6,
|
||||
msg_discard_tr = 7
|
||||
msg_discard_tr = 7,
|
||||
msg_import_deferred_fin = 8,
|
||||
msg_import_deferred_tr = 9
|
||||
};
|
||||
bool skip(vm::CellSlice& cs) const override;
|
||||
bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override;
|
||||
int get_tag(const vm::CellSlice& cs) const override {
|
||||
return (int)cs.prefetch_ulong(3);
|
||||
int tag = (int)cs.prefetch_ulong(3);
|
||||
if (tag != 1) {
|
||||
return tag;
|
||||
}
|
||||
return (int)cs.prefetch_ulong(5) - 0b00100 + 8;
|
||||
}
|
||||
bool get_import_fees(vm::CellBuilder& cb, vm::CellSlice& cs) const;
|
||||
};
|
||||
|
@ -822,16 +835,24 @@ struct OutMsg final : TLB_Complex {
|
|||
msg_export_deq_imm = 4,
|
||||
msg_export_deq = 12,
|
||||
msg_export_deq_short = 13,
|
||||
msg_export_tr_req = 7
|
||||
msg_export_tr_req = 7,
|
||||
msg_export_new_defer = 20, // 0b10100
|
||||
msg_export_deferred_tr = 21 // 0b10101
|
||||
};
|
||||
bool skip(vm::CellSlice& cs) const override;
|
||||
bool validate_skip(int* ops, vm::CellSlice& cs, bool weak = false) const override;
|
||||
int get_tag(const vm::CellSlice& cs) const override {
|
||||
int t = (int)cs.prefetch_ulong(3);
|
||||
return t != 6 ? t : (int)cs.prefetch_ulong(4);
|
||||
if (t == 6) {
|
||||
return (int)cs.prefetch_ulong(4);
|
||||
}
|
||||
if (t == 5) {
|
||||
return (int)cs.prefetch_ulong(5);
|
||||
}
|
||||
return t;
|
||||
}
|
||||
bool get_export_value(vm::CellBuilder& cb, vm::CellSlice& cs) const;
|
||||
bool get_created_lt(vm::CellSlice& cs, unsigned long long& created_lt) const;
|
||||
bool get_emitted_lt(vm::CellSlice& cs, unsigned long long& emitted_lt) const;
|
||||
};
|
||||
|
||||
extern const OutMsg t_OutMsg;
|
||||
|
@ -909,6 +930,16 @@ struct Aug_OutMsgQueue final : AugmentationCheckData {
|
|||
|
||||
extern const Aug_OutMsgQueue aug_OutMsgQueue;
|
||||
|
||||
struct Aug_DispatchQueue final : AugmentationCheckData {
|
||||
Aug_DispatchQueue() : AugmentationCheckData(gen::t_AccountDispatchQueue, t_uint64) {
|
||||
}
|
||||
bool eval_fork(vm::CellBuilder& cb, vm::CellSlice& left_cs, vm::CellSlice& right_cs) const override;
|
||||
bool eval_empty(vm::CellBuilder& cb) const override;
|
||||
bool eval_leaf(vm::CellBuilder& cb, vm::CellSlice& cs) const override;
|
||||
};
|
||||
|
||||
extern const Aug_DispatchQueue aug_DispatchQueue;
|
||||
|
||||
struct OutMsgQueue final : TLB_Complex {
|
||||
HashmapAugE dict_type;
|
||||
OutMsgQueue() : dict_type(32 + 64 + 256, aug_OutMsgQueue){};
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "td/utils/tl_storers.h"
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/Random.h"
|
||||
#include "vm/fmt.hpp"
|
||||
|
||||
namespace block {
|
||||
using namespace std::literals::string_literals;
|
||||
|
@ -642,7 +643,11 @@ bool EnqueuedMsgDescr::unpack(vm::CellSlice& cs) {
|
|||
}
|
||||
cur_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.cur_addr);
|
||||
next_prefix_ = interpolate_addr(src_prefix_, dest_prefix_, env.next_addr);
|
||||
lt_ = info.created_lt;
|
||||
unsigned long long lt;
|
||||
if (!tlb::t_MsgEnvelope.get_emitted_lt(vm::load_cell_slice(enq.out_msg), lt)) {
|
||||
return invalidate();
|
||||
}
|
||||
lt_ = lt;
|
||||
enqueued_lt_ = enq.enqueued_lt;
|
||||
hash_ = env.msg->get_hash().bits();
|
||||
msg_ = std::move(env.msg);
|
||||
|
@ -858,12 +863,20 @@ td::Status ShardState::unpack_out_msg_queue_info(Ref<vm::Cell> out_msg_queue_inf
|
|||
return td::Status::Error(
|
||||
-666, "ProcessedInfo in the state of "s + id_.to_str() + " is invalid according to automated validity checks");
|
||||
}
|
||||
if (!block::gen::t_IhrPendingInfo.validate_csr(1024, qinfo.ihr_pending)) {
|
||||
return td::Status::Error(
|
||||
-666, "IhrPendingInfo in the state of "s + id_.to_str() + " is invalid according to automated validity checks");
|
||||
}
|
||||
processed_upto_ = block::MsgProcessedUptoCollection::unpack(ton::ShardIdFull(id_), std::move(qinfo.proc_info));
|
||||
ihr_pending_ = std::make_unique<vm::Dictionary>(std::move(qinfo.ihr_pending), 320);
|
||||
ihr_pending_ = std::make_unique<vm::Dictionary>(320);
|
||||
if (qinfo.extra.write().fetch_long(1)) {
|
||||
block::gen::OutMsgQueueExtra::Record extra;
|
||||
if (!block::tlb::csr_unpack(qinfo.extra, extra)) {
|
||||
return td::Status::Error(-666, "cannot unpack OutMsgQueueExtre in the state of "s + id_.to_str());
|
||||
}
|
||||
dispatch_queue_ = std::make_unique<vm::AugmentedDictionary>(extra.dispatch_queue, 256, tlb::aug_DispatchQueue);
|
||||
if (extra.out_queue_size.write().fetch_long(1)) {
|
||||
out_msg_queue_size_ = extra.out_queue_size->prefetch_ulong(48);
|
||||
}
|
||||
} else {
|
||||
dispatch_queue_ = std::make_unique<vm::AugmentedDictionary>(256, tlb::aug_DispatchQueue);
|
||||
}
|
||||
auto shard1 = id_.shard_full();
|
||||
td::BitArray<64> pfx{(long long)shard1.shard};
|
||||
int pfx_len = shard_prefix_length(shard1);
|
||||
|
@ -994,6 +1007,17 @@ td::Status ShardState::merge_with(ShardState& sib) {
|
|||
underload_history_ = overload_history_ = 0;
|
||||
// 10. compute vert_seqno
|
||||
vert_seqno_ = std::max(vert_seqno_, sib.vert_seqno_);
|
||||
// 11. merge dispatch_queue (same as account dict)
|
||||
if (!dispatch_queue_->combine_with(*sib.dispatch_queue_)) {
|
||||
return td::Status::Error(-666, "cannot merge dispatch queues of the two ancestors");
|
||||
}
|
||||
sib.dispatch_queue_.reset();
|
||||
// 11. merge out_msg_queue_size
|
||||
if (out_msg_queue_size_ && sib.out_msg_queue_size_) {
|
||||
out_msg_queue_size_.value() += sib.out_msg_queue_size_.value();
|
||||
} else {
|
||||
out_msg_queue_size_ = {};
|
||||
}
|
||||
// Anything else? add here
|
||||
// ...
|
||||
|
||||
|
@ -1009,8 +1033,8 @@ td::Status ShardState::merge_with(ShardState& sib) {
|
|||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Result<std::unique_ptr<vm::AugmentedDictionary>> ShardState::compute_split_out_msg_queue(ton::ShardIdFull subshard,
|
||||
td::uint32* queue_size) {
|
||||
td::Result<std::unique_ptr<vm::AugmentedDictionary>> ShardState::compute_split_out_msg_queue(
|
||||
ton::ShardIdFull subshard) {
|
||||
auto shard = id_.shard_full();
|
||||
if (!ton::shard_is_parent(shard, subshard)) {
|
||||
return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() +
|
||||
|
@ -1018,7 +1042,7 @@ td::Result<std::unique_ptr<vm::AugmentedDictionary>> ShardState::compute_split_o
|
|||
}
|
||||
CHECK(out_msg_queue_);
|
||||
auto subqueue = std::make_unique<vm::AugmentedDictionary>(*out_msg_queue_);
|
||||
int res = block::filter_out_msg_queue(*subqueue, shard, subshard, queue_size);
|
||||
int res = block::filter_out_msg_queue(*subqueue, shard, subshard);
|
||||
if (res < 0) {
|
||||
return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str());
|
||||
}
|
||||
|
@ -1040,7 +1064,7 @@ td::Result<std::shared_ptr<block::MsgProcessedUptoCollection>> ShardState::compu
|
|||
return std::move(sub_processed_upto);
|
||||
}
|
||||
|
||||
td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) {
|
||||
td::Status ShardState::split(ton::ShardIdFull subshard) {
|
||||
if (!ton::shard_is_parent(id_.shard_full(), subshard)) {
|
||||
return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() +
|
||||
" because it is not a parent");
|
||||
|
@ -1058,10 +1082,12 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size)
|
|||
auto shard1 = id_.shard_full();
|
||||
CHECK(ton::shard_is_parent(shard1, subshard));
|
||||
CHECK(out_msg_queue_);
|
||||
int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, queue_size);
|
||||
td::uint64 queue_size;
|
||||
int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, &queue_size);
|
||||
if (res1 < 0) {
|
||||
return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str());
|
||||
}
|
||||
out_msg_queue_size_ = queue_size;
|
||||
LOG(DEBUG) << "split counters: " << res1;
|
||||
// 3. processed_upto
|
||||
LOG(DEBUG) << "splitting ProcessedUpto";
|
||||
|
@ -1091,6 +1117,11 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size)
|
|||
// NB: if total_fees_extra will be allowed to be non-empty, split it here too
|
||||
// 7. reset overload/underload history
|
||||
overload_history_ = underload_history_ = 0;
|
||||
// 8. split dispatch_queue (same as account dict)
|
||||
LOG(DEBUG) << "splitting dispatch_queue";
|
||||
CHECK(dispatch_queue_);
|
||||
CHECK(dispatch_queue_->cut_prefix_subdict(pfx.bits(), pfx_len));
|
||||
CHECK(dispatch_queue_->has_common_prefix(pfx.bits(), pfx_len));
|
||||
// 999. anything else?
|
||||
id_.id.shard = subshard.shard;
|
||||
id_.file_hash.set_zero();
|
||||
|
@ -1099,7 +1130,7 @@ td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size)
|
|||
}
|
||||
|
||||
int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard,
|
||||
td::uint32* queue_size) {
|
||||
td::uint64* queue_size) {
|
||||
if (queue_size) {
|
||||
*queue_size = 0;
|
||||
}
|
||||
|
@ -1390,7 +1421,7 @@ bool ValueFlow::store(vm::CellBuilder& cb) const {
|
|||
&& exported.store(cb2) // exported:CurrencyCollection
|
||||
&& cb.store_ref_bool(cb2.finalize()) // ]
|
||||
&& fees_collected.store(cb) // fees_collected:CurrencyCollection
|
||||
&& (burned.is_zero() || burned.store(cb)) // fees_burned:CurrencyCollection
|
||||
&& (burned.is_zero() || burned.store(cb)) // fees_burned:CurrencyCollection
|
||||
&& fees_imported.store(cb2) // ^[ fees_imported:CurrencyCollection
|
||||
&& recovered.store(cb2) // recovered:CurrencyCollection
|
||||
&& created.store(cb2) // created:CurrencyCollection
|
||||
|
@ -1419,8 +1450,7 @@ bool ValueFlow::fetch(vm::CellSlice& cs) {
|
|||
from_prev_blk.validate_unpack(std::move(f2.r1.from_prev_blk)) &&
|
||||
to_next_blk.validate_unpack(std::move(f2.r1.to_next_blk)) &&
|
||||
imported.validate_unpack(std::move(f2.r1.imported)) && exported.validate_unpack(std::move(f2.r1.exported)) &&
|
||||
fees_collected.validate_unpack(std::move(f2.fees_collected)) &&
|
||||
burned.validate_unpack(std::move(f2.burned)) &&
|
||||
fees_collected.validate_unpack(std::move(f2.fees_collected)) && burned.validate_unpack(std::move(f2.burned)) &&
|
||||
fees_imported.validate_unpack(std::move(f2.r2.fees_imported)) &&
|
||||
recovered.validate_unpack(std::move(f2.r2.recovered)) && created.validate_unpack(std::move(f2.r2.created)) &&
|
||||
minted.validate_unpack(std::move(f2.r2.minted))) {
|
||||
|
@ -2305,4 +2335,132 @@ bool parse_block_id_ext(td::Slice str, ton::BlockIdExt& blkid) {
|
|||
return parse_block_id_ext(str.begin(), str.end(), blkid);
|
||||
}
|
||||
|
||||
bool unpack_account_dispatch_queue(Ref<vm::CellSlice> csr, vm::Dictionary& dict, td::uint64& dict_size) {
|
||||
if (csr.not_null()) {
|
||||
block::gen::AccountDispatchQueue::Record rec;
|
||||
if (!block::tlb::csr_unpack(std::move(csr), rec)) {
|
||||
return false;
|
||||
}
|
||||
dict = vm::Dictionary{rec.messages, 64};
|
||||
dict_size = rec.count;
|
||||
if (dict_size == 0 || dict.is_empty()) {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
dict = vm::Dictionary{64};
|
||||
dict_size = 0;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Ref<vm::CellSlice> pack_account_dispatch_queue(const vm::Dictionary& dict, td::uint64 dict_size) {
|
||||
if (dict_size == 0) {
|
||||
return {};
|
||||
}
|
||||
// _ messages:(HashmapE 64 EnqueuedMsg) count:uint48 = AccountDispatchQueue;
|
||||
vm::CellBuilder cb;
|
||||
CHECK(dict.append_dict_to_bool(cb));
|
||||
cb.store_long(dict_size, 48);
|
||||
return cb.as_cellslice_ref();
|
||||
}
|
||||
|
||||
Ref<vm::CellSlice> get_dispatch_queue_min_lt_account(const vm::AugmentedDictionary& dispatch_queue,
|
||||
ton::StdSmcAddress& addr) {
|
||||
// TODO: This can be done more effectively
|
||||
vm::AugmentedDictionary queue{dispatch_queue.get_root(), 256, tlb::aug_DispatchQueue};
|
||||
if (queue.is_empty()) {
|
||||
return {};
|
||||
}
|
||||
auto root_extra = queue.get_root_extra();
|
||||
if (root_extra.is_null()) {
|
||||
return {};
|
||||
}
|
||||
ton::LogicalTime min_lt = root_extra->prefetch_long(64);
|
||||
while (true) {
|
||||
td::Bits256 key;
|
||||
int pfx_len = queue.get_common_prefix(key.bits(), 256);
|
||||
if (pfx_len < 0) {
|
||||
return {};
|
||||
}
|
||||
if (pfx_len == 256) {
|
||||
addr = key;
|
||||
return queue.lookup(key);
|
||||
}
|
||||
key[pfx_len] = false;
|
||||
vm::AugmentedDictionary queue_cut{queue.get_root(), 256, tlb::aug_DispatchQueue};
|
||||
if (!queue_cut.cut_prefix_subdict(key.bits(), pfx_len + 1)) {
|
||||
return {};
|
||||
}
|
||||
root_extra = queue_cut.get_root_extra();
|
||||
if (root_extra.is_null()) {
|
||||
return {};
|
||||
}
|
||||
ton::LogicalTime cut_min_lt = root_extra->prefetch_long(64);
|
||||
if (cut_min_lt != min_lt) {
|
||||
key[pfx_len] = true;
|
||||
}
|
||||
if (!queue.cut_prefix_subdict(key.bits(), pfx_len + 1)) {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool remove_dispatch_queue_entry(vm::AugmentedDictionary& dispatch_queue, const ton::StdSmcAddress& addr,
|
||||
ton::LogicalTime lt) {
|
||||
auto account_dispatch_queue = dispatch_queue.lookup(addr);
|
||||
if (account_dispatch_queue.is_null()) {
|
||||
return false;
|
||||
}
|
||||
vm::Dictionary dict{64};
|
||||
td::uint64 dict_size;
|
||||
if (!unpack_account_dispatch_queue(std::move(account_dispatch_queue), dict, dict_size)) {
|
||||
return false;
|
||||
}
|
||||
td::BitArray<64> key;
|
||||
key.store_ulong(lt);
|
||||
auto entry = dict.lookup_delete(key);
|
||||
if (entry.is_null()) {
|
||||
return false;
|
||||
}
|
||||
--dict_size;
|
||||
account_dispatch_queue = pack_account_dispatch_queue(dict, dict_size);
|
||||
if (account_dispatch_queue.not_null()) {
|
||||
dispatch_queue.set(addr, account_dispatch_queue);
|
||||
} else {
|
||||
dispatch_queue.lookup_delete(addr);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MsgMetadata::unpack(vm::CellSlice& cs) {
|
||||
// msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata;
|
||||
int tag;
|
||||
return cs.fetch_int_to(4, tag) && tag == 0 && cs.fetch_uint_to(32, depth) &&
|
||||
cs.prefetch_ulong(3) == 0b100 && // std address, no anycast
|
||||
tlb::t_MsgAddressInt.extract_std_address(cs, initiator_wc, initiator_addr) &&
|
||||
cs.fetch_uint_to(64, initiator_lt);
|
||||
}
|
||||
|
||||
bool MsgMetadata::pack(vm::CellBuilder& cb) const {
|
||||
// msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata;
|
||||
return cb.store_long_bool(0, 4) && cb.store_long_bool(depth, 32) &&
|
||||
tlb::t_MsgAddressInt.store_std_address(cb, initiator_wc, initiator_addr) &&
|
||||
cb.store_long_bool(initiator_lt, 64);
|
||||
}
|
||||
|
||||
std::string MsgMetadata::to_str() const {
|
||||
return PSTRING() << "[ depth=" << depth << " init=" << initiator_wc << ":" << initiator_addr.to_hex() << ":"
|
||||
<< initiator_lt << " ]";
|
||||
}
|
||||
|
||||
bool MsgMetadata::operator==(const MsgMetadata& other) const {
|
||||
return depth == other.depth && initiator_wc == other.initiator_wc && initiator_addr == other.initiator_addr &&
|
||||
initiator_lt == other.initiator_lt;
|
||||
}
|
||||
|
||||
bool MsgMetadata::operator!=(const MsgMetadata& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
|
||||
} // namespace block
|
||||
|
|
|
@ -417,6 +417,8 @@ struct ShardState {
|
|||
std::unique_ptr<vm::Dictionary> ihr_pending_;
|
||||
std::unique_ptr<vm::Dictionary> block_create_stats_;
|
||||
std::shared_ptr<block::MsgProcessedUptoCollection> processed_upto_;
|
||||
std::unique_ptr<vm::AugmentedDictionary> dispatch_queue_;
|
||||
td::optional<td::uint64> out_msg_queue_size_;
|
||||
|
||||
bool is_valid() const {
|
||||
return id_.is_valid();
|
||||
|
@ -433,11 +435,10 @@ struct ShardState {
|
|||
ton::BlockSeqno prev_mc_block_seqno, bool after_split, bool clear_history,
|
||||
std::function<bool(ton::BlockSeqno)> for_each_mcseqno);
|
||||
td::Status merge_with(ShardState& sib);
|
||||
td::Result<std::unique_ptr<vm::AugmentedDictionary>> compute_split_out_msg_queue(ton::ShardIdFull subshard,
|
||||
td::uint32* queue_size = nullptr);
|
||||
td::Result<std::unique_ptr<vm::AugmentedDictionary>> compute_split_out_msg_queue(ton::ShardIdFull subshard);
|
||||
td::Result<std::shared_ptr<block::MsgProcessedUptoCollection>> compute_split_processed_upto(
|
||||
ton::ShardIdFull subshard);
|
||||
td::Status split(ton::ShardIdFull subshard, td::uint32* queue_size = nullptr);
|
||||
td::Status split(ton::ShardIdFull subshard);
|
||||
td::Status unpack_out_msg_queue_info(Ref<vm::Cell> out_msg_queue_info);
|
||||
bool clear_load_history() {
|
||||
overload_history_ = underload_history_ = 0;
|
||||
|
@ -658,7 +659,7 @@ class MtCarloComputeShare {
|
|||
};
|
||||
|
||||
int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard,
|
||||
td::uint32* queue_size = nullptr);
|
||||
td::uint64* queue_size = nullptr);
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const ShardId& shard_id);
|
||||
|
||||
|
@ -749,4 +750,25 @@ bool parse_hex_hash(td::Slice str, td::Bits256& hash);
|
|||
bool parse_block_id_ext(const char* str, const char* end, ton::BlockIdExt& blkid);
|
||||
bool parse_block_id_ext(td::Slice str, ton::BlockIdExt& blkid);
|
||||
|
||||
bool unpack_account_dispatch_queue(Ref<vm::CellSlice> csr, vm::Dictionary& dict, td::uint64& dict_size);
|
||||
Ref<vm::CellSlice> pack_account_dispatch_queue(const vm::Dictionary& dict, td::uint64 dict_size);
|
||||
Ref<vm::CellSlice> get_dispatch_queue_min_lt_account(const vm::AugmentedDictionary& dispatch_queue,
|
||||
ton::StdSmcAddress& addr);
|
||||
bool remove_dispatch_queue_entry(vm::AugmentedDictionary& dispatch_queue, const ton::StdSmcAddress& addr,
|
||||
ton::LogicalTime lt);
|
||||
|
||||
struct MsgMetadata {
|
||||
td::uint32 depth;
|
||||
ton::WorkchainId initiator_wc;
|
||||
ton::StdSmcAddress initiator_addr;
|
||||
ton::LogicalTime initiator_lt;
|
||||
|
||||
bool unpack(vm::CellSlice& cs);
|
||||
bool pack(vm::CellBuilder& cb) const;
|
||||
std::string to_str() const;
|
||||
|
||||
bool operator==(const MsgMetadata& other) const;
|
||||
bool operator!=(const MsgMetadata& other) const;
|
||||
};
|
||||
|
||||
} // namespace block
|
||||
|
|
|
@ -172,6 +172,12 @@ interm_addr_ext$11 workchain_id:int32 addr_pfx:uint64
|
|||
msg_envelope#4 cur_addr:IntermediateAddress
|
||||
next_addr:IntermediateAddress fwd_fee_remaining:Grams
|
||||
msg:^(Message Any) = MsgEnvelope;
|
||||
msg_metadata#0 depth:uint32 initiator_addr:MsgAddressInt initiator_lt:uint64 = MsgMetadata;
|
||||
msg_envelope_v2#5 cur_addr:IntermediateAddress
|
||||
next_addr:IntermediateAddress fwd_fee_remaining:Grams
|
||||
msg:^(Message Any)
|
||||
emitted_lt:(Maybe uint64)
|
||||
metadata:(Maybe MsgMetadata) = MsgEnvelope;
|
||||
//
|
||||
msg_import_ext$000 msg:^(Message Any) transaction:^Transaction
|
||||
= InMsg;
|
||||
|
@ -187,6 +193,9 @@ msg_discard_fin$110 in_msg:^MsgEnvelope transaction_id:uint64
|
|||
fwd_fee:Grams = InMsg;
|
||||
msg_discard_tr$111 in_msg:^MsgEnvelope transaction_id:uint64
|
||||
fwd_fee:Grams proof_delivered:^Cell = InMsg;
|
||||
msg_import_deferred_fin$00100 in_msg:^MsgEnvelope
|
||||
transaction:^Transaction fwd_fee:Grams = InMsg;
|
||||
msg_import_deferred_tr$00101 in_msg:^MsgEnvelope out_msg:^MsgEnvelope = InMsg;
|
||||
//
|
||||
import_fees$_ fees_collected:Grams
|
||||
value_imported:CurrencyCollection = ImportFees;
|
||||
|
@ -210,6 +219,10 @@ msg_export_tr_req$111 out_msg:^MsgEnvelope
|
|||
imported:^InMsg = OutMsg;
|
||||
msg_export_deq_imm$100 out_msg:^MsgEnvelope
|
||||
reimport:^InMsg = OutMsg;
|
||||
msg_export_new_defer$10100 out_msg:^MsgEnvelope
|
||||
transaction:^Transaction = OutMsg;
|
||||
msg_export_deferred_tr$10101 out_msg:^MsgEnvelope
|
||||
imported:^InMsg = OutMsg;
|
||||
|
||||
_ enqueued_lt:uint64 out_msg:^MsgEnvelope = EnqueuedMsg;
|
||||
|
||||
|
@ -224,8 +237,15 @@ _ (HashmapE 96 ProcessedUpto) = ProcessedInfo;
|
|||
ihr_pending$_ import_lt:uint64 = IhrPendingSince;
|
||||
_ (HashmapE 320 IhrPendingSince) = IhrPendingInfo;
|
||||
|
||||
// key - created_lt
|
||||
_ messages:(HashmapE 64 EnqueuedMsg) count:uint48 = AccountDispatchQueue;
|
||||
// key - sender address, aug - min created_lt
|
||||
_ (HashmapAugE 256 AccountDispatchQueue uint64) = DispatchQueue;
|
||||
|
||||
out_msg_queue_extra#0 dispatch_queue:DispatchQueue out_queue_size:(Maybe uint48) = OutMsgQueueExtra;
|
||||
|
||||
_ out_queue:OutMsgQueue proc_info:ProcessedInfo
|
||||
ihr_pending:IhrPendingInfo = OutMsgQueueInfo;
|
||||
extra:(Maybe OutMsgQueueExtra) = OutMsgQueueInfo;
|
||||
//
|
||||
storage_used$_ cells:(VarUInteger 7) bits:(VarUInteger 7)
|
||||
public_cells:(VarUInteger 7) = StorageUsed;
|
||||
|
@ -781,7 +801,7 @@ size_limits_config#01 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells
|
|||
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig;
|
||||
size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16
|
||||
max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32
|
||||
max_acc_public_libraries:uint32 = SizeLimitsConfig;
|
||||
max_acc_public_libraries:uint32 defer_out_queue_size_limit:uint32 = SizeLimitsConfig;
|
||||
_ SizeLimitsConfig = ConfigParam 43;
|
||||
|
||||
// key is [ wc:int32 addr:uint256 ]
|
||||
|
|
|
@ -1956,6 +1956,7 @@ td::Result<SizeLimitsConfig> Config::do_get_size_limits_config(td::Ref<vm::CellS
|
|||
limits.max_acc_state_bits = rec.max_acc_state_bits;
|
||||
limits.max_acc_state_cells = rec.max_acc_state_cells;
|
||||
limits.max_acc_public_libraries = rec.max_acc_public_libraries;
|
||||
limits.defer_out_queue_size_limit = rec.defer_out_queue_size_limit;
|
||||
};
|
||||
gen::SizeLimitsConfig::Record_size_limits_config rec_v1;
|
||||
gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2;
|
||||
|
|
|
@ -395,6 +395,7 @@ struct SizeLimitsConfig {
|
|||
td::uint32 max_acc_state_cells = 1 << 16;
|
||||
td::uint32 max_acc_state_bits = (1 << 16) * 1023;
|
||||
td::uint32 max_acc_public_libraries = 256;
|
||||
td::uint32 defer_out_queue_size_limit = 256;
|
||||
};
|
||||
|
||||
struct CatchainValidatorsConfig {
|
||||
|
|
|
@ -1285,7 +1285,11 @@ bool Transaction::prepare_rand_seed(td::BitArray<256>& rand_seed, const ComputeP
|
|||
// if the smart contract wants to randomize further, it can use RANDOMIZE instruction
|
||||
td::BitArray<256 + 256> data;
|
||||
data.bits().copy_from(cfg.block_rand_seed.cbits(), 256);
|
||||
(data.bits() + 256).copy_from(account.addr_rewrite.cbits(), 256);
|
||||
if (cfg.global_version >= 8) {
|
||||
(data.bits() + 256).copy_from(account.addr.cbits(), 256);
|
||||
} else {
|
||||
(data.bits() + 256).copy_from(account.addr_rewrite.cbits(), 256);
|
||||
}
|
||||
rand_seed.clear();
|
||||
data.compute_sha256(rand_seed);
|
||||
return true;
|
||||
|
@ -1600,12 +1604,22 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) {
|
|||
cp.skip_reason = in_msg_state.not_null() ? ComputePhase::sk_bad_state : ComputePhase::sk_no_state;
|
||||
return true;
|
||||
} else if (in_msg_state.not_null()) {
|
||||
if (cfg.allow_external_unfreeze) {
|
||||
if (in_msg_extern && account.addr != in_msg_state->get_hash().bits()) {
|
||||
// only for external messages with non-zero initstate in active accounts
|
||||
LOG(DEBUG) << "in_msg_state hash mismatch in external message";
|
||||
cp.skip_reason = ComputePhase::sk_bad_state;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
unpack_msg_state(cfg, true); // use only libraries
|
||||
}
|
||||
if (in_msg_extern && in_msg_state.not_null() && account.addr != in_msg_state->get_hash().bits()) {
|
||||
LOG(DEBUG) << "in_msg_state hash mismatch in external message";
|
||||
cp.skip_reason = ComputePhase::sk_bad_state;
|
||||
return true;
|
||||
if (!cfg.allow_external_unfreeze) {
|
||||
if (in_msg_extern && in_msg_state.not_null() && account.addr != in_msg_state->get_hash().bits()) {
|
||||
LOG(DEBUG) << "in_msg_state hash mismatch in external message";
|
||||
cp.skip_reason = ComputePhase::sk_bad_state;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
td::optional<PrecompiledContractsConfig::Contract> precompiled;
|
||||
|
@ -1647,7 +1661,12 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) {
|
|||
std::unique_ptr<StringLoggerTail> logger;
|
||||
auto vm_log = vm::VmLog();
|
||||
if (cfg.with_vm_log) {
|
||||
size_t log_max_size = cfg.vm_log_verbosity > 0 ? 1024 * 1024 : 256;
|
||||
size_t log_max_size = 256;
|
||||
if (cfg.vm_log_verbosity > 4) {
|
||||
log_max_size = 32 << 20;
|
||||
} else if (cfg.vm_log_verbosity > 0) {
|
||||
log_max_size = 1 << 20;
|
||||
}
|
||||
logger = std::make_unique<StringLoggerTail>(log_max_size);
|
||||
vm_log.log_interface = logger.get();
|
||||
vm_log.log_options = td::LogOptions(VERBOSITY_NAME(DEBUG), true, false);
|
||||
|
@ -1659,6 +1678,7 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) {
|
|||
vm_log.log_mask |= vm::VmLog::DumpStack;
|
||||
if (cfg.vm_log_verbosity > 4) {
|
||||
vm_log.log_mask |= vm::VmLog::DumpStackVerbose;
|
||||
vm_log.log_mask |= vm::VmLog::DumpC5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1826,6 +1846,26 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) {
|
|||
for (int i = n - 1; i >= 0; --i) {
|
||||
ap.result_arg = n - 1 - i;
|
||||
if (!block::gen::t_OutListNode.validate_ref(ap.action_list[i])) {
|
||||
if (cfg.message_skip_enabled) {
|
||||
// try to read mode from action_send_msg even if out_msg scheme is violated
|
||||
// action should at least contain 40 bits: 32bit tag and 8 bit mode
|
||||
// if (mode & 2), that is ignore error mode, skip action even for invalid message
|
||||
// if there is no (mode & 2) but (mode & 16) presents - enable bounce if possible
|
||||
bool special = true;
|
||||
auto cs = load_cell_slice_special(ap.action_list[i], special);
|
||||
if (!special) {
|
||||
if ((cs.size() >= 40) && ((int)cs.fetch_ulong(32) == 0x0ec3c86d)) {
|
||||
int mode = (int)cs.fetch_ulong(8);
|
||||
if (mode & 2) {
|
||||
ap.skipped_actions++;
|
||||
ap.action_list[i] = {};
|
||||
continue;
|
||||
} else if ((mode & 16) && cfg.bounce_on_fail_enabled) {
|
||||
ap.bounce = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ap.result_code = 34; // action #i invalid or unsupported
|
||||
ap.action_list_invalid = true;
|
||||
LOG(DEBUG) << "invalid action " << ap.result_arg << " found while preprocessing action list: error code "
|
||||
|
@ -1835,6 +1875,9 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) {
|
|||
}
|
||||
ap.valid = true;
|
||||
for (int i = n - 1; i >= 0; --i) {
|
||||
if(ap.action_list[i].is_null()) {
|
||||
continue;
|
||||
}
|
||||
ap.result_arg = n - 1 - i;
|
||||
vm::CellSlice cs = load_cell_slice(ap.action_list[i]);
|
||||
CHECK(cs.fetch_ref().not_null());
|
||||
|
@ -1872,7 +1915,7 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) {
|
|||
ap.no_funds = true;
|
||||
}
|
||||
LOG(DEBUG) << "invalid action " << ap.result_arg << " in action list: error code " << ap.result_code;
|
||||
// This is reuqired here because changes to libraries are applied even if actipn phase fails
|
||||
// This is required here because changes to libraries are applied even if actipn phase fails
|
||||
enforce_state_limits();
|
||||
if (cfg.action_fine_enabled) {
|
||||
ap.action_fine = std::min(ap.action_fine, balance.grams);
|
||||
|
@ -2280,6 +2323,15 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
return -1;
|
||||
}
|
||||
bool skip_invalid = (act_rec.mode & 2);
|
||||
auto check_skip_invalid = [&](unsigned error_code) -> unsigned int {
|
||||
if (skip_invalid) {
|
||||
if (cfg.message_skip_enabled) {
|
||||
ap.skipped_actions++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
return error_code;
|
||||
};
|
||||
// try to parse suggested message in act_rec.out_msg
|
||||
td::RefInt256 fwd_fee, ihr_fee;
|
||||
block::gen::MessageRelaxed::Record msg;
|
||||
|
@ -2346,8 +2398,12 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
if (!tlb::csr_unpack(msg.info, info) || !block::tlb::t_CurrencyCollection.validate_csr(info.value)) {
|
||||
return -1;
|
||||
}
|
||||
fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee);
|
||||
ihr_fee = block::tlb::t_Grams.as_integer(info.ihr_fee);
|
||||
if (cfg.disable_custom_fess) {
|
||||
fwd_fee = ihr_fee = td::zero_refint();
|
||||
} else {
|
||||
fwd_fee = block::tlb::t_Grams.as_integer(info.fwd_fee);
|
||||
ihr_fee = block::tlb::t_Grams.as_integer(info.ihr_fee);
|
||||
}
|
||||
}
|
||||
// set created_at and created_lt to correct values
|
||||
info.created_at = now;
|
||||
|
@ -2363,7 +2419,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
bool to_mc = false;
|
||||
if (!check_rewrite_dest_addr(info.dest, cfg, &to_mc)) {
|
||||
LOG(DEBUG) << "invalid destination address in a proposed outbound message";
|
||||
return skip_invalid ? 0 : 36; // invalid destination address
|
||||
return check_skip_invalid(36); // invalid destination address
|
||||
}
|
||||
|
||||
// fetch message pricing info
|
||||
|
@ -2378,7 +2434,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
if (!ext_msg && !(act_rec.mode & 0x80) && !(act_rec.mode & 1)) {
|
||||
if (!block::tlb::t_CurrencyCollection.validate_csr(info.value)) {
|
||||
LOG(DEBUG) << "invalid value:CurrencyCollection in proposed outbound message";
|
||||
return skip_invalid ? 0 : 37;
|
||||
return check_skip_invalid(37);
|
||||
}
|
||||
block::CurrencyCollection value;
|
||||
CHECK(value.unpack(info.value));
|
||||
|
@ -2395,7 +2451,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
if (new_funds->sgn() < 0) {
|
||||
LOG(DEBUG)
|
||||
<< "not enough value to transfer with the message: all of the inbound message value has been consumed";
|
||||
return skip_invalid ? 0 : 37;
|
||||
return check_skip_invalid(37);
|
||||
}
|
||||
}
|
||||
funds = std::min(funds, new_funds);
|
||||
|
@ -2433,17 +2489,17 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
if (sstat.cells > max_cells && max_cells < cfg.size_limits.max_msg_cells) {
|
||||
LOG(DEBUG) << "not enough funds to process a message (max_cells=" << max_cells << ")";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 40;
|
||||
return check_skip_invalid(40);
|
||||
}
|
||||
if (sstat.bits > cfg.size_limits.max_msg_bits || sstat.cells > max_cells) {
|
||||
LOG(DEBUG) << "message too large, invalid";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 40;
|
||||
return check_skip_invalid(40);
|
||||
}
|
||||
if (max_merkle_depth > max_allowed_merkle_depth) {
|
||||
LOG(DEBUG) << "message has too big merkle depth, invalid";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 40;
|
||||
return check_skip_invalid(40);
|
||||
}
|
||||
LOG(DEBUG) << "storage paid for a message: " << sstat.cells << " cells, " << sstat.bits << " bits";
|
||||
|
||||
|
@ -2475,7 +2531,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
if (!block::tlb::t_CurrencyCollection.validate_csr(info.value)) {
|
||||
LOG(DEBUG) << "invalid value:CurrencyCollection in proposed outbound message";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 37;
|
||||
return check_skip_invalid(37);
|
||||
}
|
||||
if (info.ihr_disabled) {
|
||||
// if IHR is disabled, IHR fees will be always zero
|
||||
|
@ -2502,7 +2558,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
LOG(DEBUG)
|
||||
<< "not enough value to transfer with the message: all of the inbound message value has been consumed";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 37;
|
||||
return check_skip_invalid(37);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2518,7 +2574,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
LOG(DEBUG) << "not enough value attached to the message to pay forwarding fees : have " << req.grams << ", need "
|
||||
<< fees_total;
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 37; // not enough grams
|
||||
return check_skip_invalid(37); // not enough grams
|
||||
} else {
|
||||
// decrease message value
|
||||
req.grams -= fees_total;
|
||||
|
@ -2529,7 +2585,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
LOG(DEBUG) << "not enough grams to transfer with the message : remaining balance is "
|
||||
<< ap.remaining_balance.to_str() << ", need " << req_grams_brutto << " (including forwarding fees)";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 37; // not enough grams
|
||||
return check_skip_invalid(37); // not enough grams
|
||||
}
|
||||
|
||||
Ref<vm::Cell> new_extra;
|
||||
|
@ -2539,7 +2595,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
<< block::CurrencyCollection{0, req.extra}.to_str() << " required, only "
|
||||
<< block::CurrencyCollection{0, ap.remaining_balance.extra}.to_str() << " available";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 38; // not enough (extra) funds
|
||||
return check_skip_invalid(38); // not enough (extra) funds
|
||||
}
|
||||
if (ap.remaining_balance.extra.not_null() || req.extra.not_null()) {
|
||||
LOG(DEBUG) << "subtracting extra currencies: "
|
||||
|
@ -2563,7 +2619,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
LOG(DEBUG) << "outbound message does not fit into a cell after rewriting";
|
||||
if (redoing == 2) {
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 39;
|
||||
return check_skip_invalid(39);
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
|
@ -2588,7 +2644,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
if (ap.remaining_balance.grams < fwd_fee) {
|
||||
LOG(DEBUG) << "not enough funds to pay for an outbound external message";
|
||||
collect_fine();
|
||||
return skip_invalid ? 0 : 37; // not enough grams
|
||||
return check_skip_invalid(37); // not enough grams
|
||||
}
|
||||
// repack message
|
||||
// ext_out_msg_info$11 constructor of CommonMsgInfo
|
||||
|
@ -2603,7 +2659,7 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap,
|
|||
LOG(DEBUG) << "outbound message does not fit into a cell after rewriting";
|
||||
if (redoing == 2) {
|
||||
collect_fine();
|
||||
return (skip_invalid ? 0 : 39);
|
||||
return check_skip_invalid(39);
|
||||
}
|
||||
return -2;
|
||||
}
|
||||
|
@ -3514,7 +3570,7 @@ LtCellRef Transaction::extract_out_msg(unsigned i) {
|
|||
* @returns A triple of the logical time, the extracted output message and the transaction root.
|
||||
*/
|
||||
NewOutMsg Transaction::extract_out_msg_ext(unsigned i) {
|
||||
return {start_lt + i + 1, std::move(out_msgs.at(i)), root};
|
||||
return {start_lt + i + 1, std::move(out_msgs.at(i)), root, i};
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3684,6 +3740,7 @@ td::Status FetchConfigParams::fetch_config_params(
|
|||
compute_phase_cfg->suspended_addresses = config.get_suspended_addresses(now);
|
||||
compute_phase_cfg->size_limits = size_limits;
|
||||
compute_phase_cfg->precompiled_contracts = config.get_precompiled_contracts_config();
|
||||
compute_phase_cfg->allow_external_unfreeze = compute_phase_cfg->global_version >= 8;
|
||||
}
|
||||
{
|
||||
// compute action_phase_cfg
|
||||
|
@ -3707,6 +3764,8 @@ td::Status FetchConfigParams::fetch_config_params(
|
|||
action_phase_cfg->size_limits = size_limits;
|
||||
action_phase_cfg->action_fine_enabled = config.get_global_version() >= 4;
|
||||
action_phase_cfg->bounce_on_fail_enabled = config.get_global_version() >= 4;
|
||||
action_phase_cfg->message_skip_enabled = config.get_global_version() >= 8;
|
||||
action_phase_cfg->disable_custom_fess = config.get_global_version() >= 8;
|
||||
action_phase_cfg->mc_blackhole_addr = config.get_burning_config().blackhole_addr;
|
||||
}
|
||||
{
|
||||
|
|
|
@ -66,8 +66,11 @@ struct NewOutMsg {
|
|||
ton::LogicalTime lt;
|
||||
Ref<vm::Cell> msg;
|
||||
Ref<vm::Cell> trans;
|
||||
NewOutMsg(ton::LogicalTime _lt, Ref<vm::Cell> _msg, Ref<vm::Cell> _trans)
|
||||
: lt(_lt), msg(std::move(_msg)), trans(std::move(_trans)) {
|
||||
unsigned msg_idx;
|
||||
td::optional<MsgMetadata> metadata;
|
||||
td::Ref<vm::Cell> msg_env_from_dispatch_queue; // Not null if from dispatch queue; in this case lt is emitted_lt
|
||||
NewOutMsg(ton::LogicalTime _lt, Ref<vm::Cell> _msg, Ref<vm::Cell> _trans, unsigned _msg_idx)
|
||||
: lt(_lt), msg(std::move(_msg)), trans(std::move(_trans)), msg_idx(_msg_idx) {
|
||||
}
|
||||
bool operator<(const NewOutMsg& other) const& {
|
||||
return lt < other.lt || (lt == other.lt && msg->get_hash() < other.msg->get_hash());
|
||||
|
@ -126,6 +129,7 @@ struct ComputePhaseConfig {
|
|||
bool stop_on_accept_message = false;
|
||||
PrecompiledContractsConfig precompiled_contracts;
|
||||
bool dont_run_precompiled_ = false;
|
||||
bool allow_external_unfreeze{false};
|
||||
|
||||
ComputePhaseConfig() : gas_price(0), gas_limit(0), special_gas_limit(0), gas_credit(0) {
|
||||
compute_threshold();
|
||||
|
@ -163,6 +167,8 @@ struct ActionPhaseConfig {
|
|||
const WorkchainSet* workchains{nullptr};
|
||||
bool action_fine_enabled{false};
|
||||
bool bounce_on_fail_enabled{false};
|
||||
bool message_skip_enabled{false};
|
||||
bool disable_custom_fess{false};
|
||||
td::optional<td::Bits256> mc_blackhole_addr;
|
||||
const MsgPrices& fetch_msg_prices(bool is_masterchain) const {
|
||||
return is_masterchain ? fwd_mc : fwd_std;
|
||||
|
|
|
@ -640,11 +640,11 @@ long parse_bitstring_hex_literal(unsigned char* buff, std::size_t buff_size, con
|
|||
return bits;
|
||||
}
|
||||
|
||||
long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size, const char* str, const char* str_end) {
|
||||
long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size_bits, const char* str, const char* str_end) {
|
||||
const char* ptr = str;
|
||||
while (ptr < str_end && buff_size && (*ptr == '0' || *ptr == '1')) {
|
||||
while (ptr < str_end && buff_size_bits && (*ptr == '0' || *ptr == '1')) {
|
||||
*buff++ = (bool)(*ptr++ & 1);
|
||||
--buff_size;
|
||||
--buff_size_bits;
|
||||
}
|
||||
return td::narrow_cast<long>(ptr == str_end ? ptr - str : str - ptr - 1);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,7 @@ unsigned long long bits_load_long_top(ConstBitPtr from, unsigned top_bits);
|
|||
long long bits_load_long(ConstBitPtr from, unsigned bits);
|
||||
unsigned long long bits_load_ulong(ConstBitPtr from, unsigned bits);
|
||||
long parse_bitstring_hex_literal(unsigned char* buff, std::size_t buff_size, const char* str, const char* str_end);
|
||||
long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size, const char* str, const char* str_end);
|
||||
long parse_bitstring_binary_literal(BitPtr buff, std::size_t buff_size_bits, const char* str, const char* str_end);
|
||||
|
||||
void bits_sha256(BitPtr to, ConstBitPtr from, std::size_t size);
|
||||
|
||||
|
|
|
@ -49,7 +49,6 @@ td::Result<int> Fift::interpret_istream(std::istream& stream, std::string curren
|
|||
}
|
||||
|
||||
td::Result<int> Fift::do_interpret(IntCtx& ctx, bool is_interactive) {
|
||||
ctx.ton_db = &config_.ton_db;
|
||||
ctx.source_lookup = &config_.source_lookup;
|
||||
ctx.dictionary = ctx.main_dictionary = ctx.context = config_.dictionary;
|
||||
ctx.output_stream = config_.output_stream;
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#pragma once
|
||||
|
||||
#include "SourceLookup.h"
|
||||
#include "vm/db/TonDb.h"
|
||||
#include "Dictionary.h"
|
||||
|
||||
#include "td/utils/Status.h"
|
||||
|
@ -31,13 +30,11 @@ struct Fift {
|
|||
public:
|
||||
struct Config {
|
||||
fift::SourceLookup source_lookup;
|
||||
vm::TonDb ton_db;
|
||||
fift::Dictionary dictionary;
|
||||
std::ostream* output_stream{&std::cout};
|
||||
std::ostream* error_stream{&std::cerr};
|
||||
bool show_backtrace{true};
|
||||
};
|
||||
// Fift must own ton_db and dictionary, no concurrent access is allowed
|
||||
explicit Fift(Config config);
|
||||
|
||||
td::Result<int> interpret_file(std::string fname, std::string current_dir, bool interactive = false);
|
||||
|
|
|
@ -46,8 +46,6 @@
|
|||
#include "SourceLookup.h"
|
||||
#include "words.h"
|
||||
|
||||
#include "vm/db/TonDb.h"
|
||||
|
||||
#include "td/utils/logging.h"
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/Parser.h"
|
||||
|
@ -65,7 +63,6 @@ void usage(const char* progname) {
|
|||
"\t-I<source-search-path>\tSets colon-separated (unix) or at-separated (windows) library source include path. If not indicated, "
|
||||
"$FIFTPATH is used instead.\n"
|
||||
"\t-L<library-fif-file>\tPre-loads a library source file\n"
|
||||
"\t-d<ton-db-path>\tUse a ton database\n"
|
||||
"\t-s\tScript mode: use first argument as a fift source file and import remaining arguments as $n)\n"
|
||||
"\t-v<verbosity-level>\tSet verbosity level\n"
|
||||
"\t-V<version>\tShow fift build information\n";
|
||||
|
@ -94,13 +91,12 @@ int main(int argc, char* const argv[]) {
|
|||
bool script_mode = false;
|
||||
std::vector<std::string> library_source_files, source_list;
|
||||
std::vector<std::string> source_include_path;
|
||||
std::string ton_db_path;
|
||||
|
||||
fift::Fift::Config config;
|
||||
|
||||
int i;
|
||||
int new_verbosity_level = VERBOSITY_NAME(INFO);
|
||||
while (!script_mode && (i = getopt(argc, argv, "hinI:L:d:sv:V")) != -1) {
|
||||
while (!script_mode && (i = getopt(argc, argv, "hinI:L:sv:V")) != -1) {
|
||||
switch (i) {
|
||||
case 'i':
|
||||
interactive = true;
|
||||
|
@ -115,9 +111,6 @@ int main(int argc, char* const argv[]) {
|
|||
case 'L':
|
||||
library_source_files.emplace_back(optarg);
|
||||
break;
|
||||
case 'd':
|
||||
ton_db_path = optarg;
|
||||
break;
|
||||
case 's':
|
||||
script_mode = true;
|
||||
break;
|
||||
|
@ -158,16 +151,6 @@ int main(int argc, char* const argv[]) {
|
|||
config.source_lookup.add_include_path(path);
|
||||
}
|
||||
|
||||
if (!ton_db_path.empty()) {
|
||||
auto r_ton_db = vm::TonDbImpl::open(ton_db_path);
|
||||
if (r_ton_db.is_error()) {
|
||||
LOG(ERROR) << "Error opening ton database: " << r_ton_db.error().to_string();
|
||||
std::exit(2);
|
||||
}
|
||||
config.ton_db = r_ton_db.move_as_ok();
|
||||
// FIXME //std::atexit([&] { config.ton_db.reset(); });
|
||||
}
|
||||
|
||||
fift::init_words_common(config.dictionary);
|
||||
fift::init_words_vm(config.dictionary, true); // enable vm debug
|
||||
fift::init_words_ton(config.dictionary);
|
||||
|
|
|
@ -22,6 +22,8 @@
|
|||
#include "td/utils/filesystem.h"
|
||||
#include "td/utils/misc.h"
|
||||
#include "td/utils/port/path.h"
|
||||
#include "vm/boc.h"
|
||||
#include <map>
|
||||
|
||||
namespace fift {
|
||||
namespace {
|
||||
|
|
|
@ -43,8 +43,6 @@
|
|||
#include "vm/box.hpp"
|
||||
#include "vm/atom.h"
|
||||
|
||||
#include "vm/db/TonDb.h" // only for interpret_db_run_vm{,_parallel}
|
||||
|
||||
#include "block/block.h"
|
||||
#include "common/global-version.h"
|
||||
|
||||
|
@ -2077,23 +2075,23 @@ void interpret_bitstring_hex_literal(IntCtx& ctx) {
|
|||
auto s = ctx.parser->scan_word_to('}');
|
||||
unsigned char buff[128];
|
||||
int bits = (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), s.begin(), s.end());
|
||||
if (bits < 0) {
|
||||
vm::CellBuilder cb;
|
||||
if (bits < 0 || !cb.store_bits_bool(td::ConstBitPtr{buff}, bits)) {
|
||||
throw IntError{"Invalid hex bitstring constant"};
|
||||
}
|
||||
auto cs = Ref<vm::CellSlice>{true, vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize()};
|
||||
ctx.stack.push(std::move(cs));
|
||||
ctx.stack.push(cb.as_cellslice_ref());
|
||||
push_argcount(ctx, 1);
|
||||
}
|
||||
|
||||
void interpret_bitstring_binary_literal(IntCtx& ctx) {
|
||||
auto s = ctx.parser->scan_word_to('}');
|
||||
unsigned char buff[128];
|
||||
int bits = (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff), s.begin(), s.end());
|
||||
if (bits < 0) {
|
||||
int bits = (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff) * 8, s.begin(), s.end());
|
||||
vm::CellBuilder cb;
|
||||
if (bits < 0 || !cb.store_bits_bool(td::ConstBitPtr{buff}, bits)) {
|
||||
throw IntError{"Invalid binary bitstring constant"};
|
||||
}
|
||||
auto cs = Ref<vm::CellSlice>{true, vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize()};
|
||||
ctx.stack.push(std::move(cs));
|
||||
ctx.stack.push(cb.as_cellslice_ref());
|
||||
push_argcount(ctx, 1);
|
||||
}
|
||||
|
||||
|
@ -2721,114 +2719,6 @@ void interpret_vmop_dump(vm::Stack& stack) {
|
|||
stack.push_string(std::move(dump));
|
||||
}
|
||||
|
||||
void do_interpret_db_run_vm_parallel(std::ostream* stream, vm::Stack& stack, vm::TonDb* ton_db_ptr, int threads_n,
|
||||
int tasks_n) {
|
||||
if (!ton_db_ptr || !*ton_db_ptr) {
|
||||
throw vm::VmError{vm::Excno::fatal, "Ton database is not available"};
|
||||
}
|
||||
auto& ton_db = *ton_db_ptr;
|
||||
auto txn = ton_db->begin_transaction();
|
||||
auto txn_abort = td::ScopeExit() + [&] { ton_db->abort_transaction(std::move(txn)); };
|
||||
|
||||
struct Task {
|
||||
vm::Ref<vm::CellSlice> code;
|
||||
vm::SmartContractDb smart;
|
||||
td::optional<vm::SmartContractDiff> diff;
|
||||
td::unique_ptr<td::Guard> guard;
|
||||
Ref<vm::Stack> stack;
|
||||
int res{0};
|
||||
Ref<vm::Cell> data;
|
||||
std::string log;
|
||||
};
|
||||
std::vector<Task> tasks(tasks_n);
|
||||
std::vector<td::thread> threads(threads_n);
|
||||
|
||||
for (auto& task : tasks) {
|
||||
task.code = stack.pop_cellslice();
|
||||
auto smart_hash = td::serialize(stack.pop_smallint_range(1000000000));
|
||||
task.smart = txn->begin_smartcontract(smart_hash);
|
||||
task.guard = td::create_lambda_guard([&] { txn->abort_smartcontract(std::move(task.smart)); });
|
||||
auto argsn = stack.pop_smallint_range(100);
|
||||
task.stack = stack.split_top(argsn);
|
||||
}
|
||||
|
||||
std::atomic<int> next_task_i{0};
|
||||
auto run_tasks = [&] {
|
||||
while (true) {
|
||||
auto task_i = next_task_i++;
|
||||
if (task_i >= tasks_n) {
|
||||
break;
|
||||
}
|
||||
auto& task = tasks[task_i];
|
||||
auto data = task.smart->get_root();
|
||||
|
||||
StringLogger logger;
|
||||
vm::VmLog log = create_vm_log(stream ? &logger : nullptr);
|
||||
|
||||
task.res = vm::run_vm_code(task.code, task.stack, 3, &data, std::move(log));
|
||||
task.smart->set_root(data);
|
||||
task.diff = vm::SmartContractDiff(std::move(task.smart));
|
||||
task.data = std::move(data);
|
||||
task.log = std::move(logger.res);
|
||||
}
|
||||
};
|
||||
|
||||
td::Timer timer;
|
||||
for (auto& thread : threads) {
|
||||
thread = td::thread(run_tasks);
|
||||
}
|
||||
run_tasks();
|
||||
for (auto& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
if (stream) {
|
||||
int id = 0;
|
||||
for (auto& task : tasks) {
|
||||
id++;
|
||||
*stream << "Task #" << id << " vm_log begin" << std::endl;
|
||||
*stream << task.log;
|
||||
*stream << "Task #" << id << " vm_log end" << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
LOG(ERROR) << timer;
|
||||
timer = {};
|
||||
|
||||
for (auto& task : tasks) {
|
||||
auto retn = task.stack.write().pop_smallint_range(100, -1);
|
||||
if (retn == -1) {
|
||||
retn = task.stack->depth();
|
||||
}
|
||||
stack.push_from_stack(std::move(*task.stack), retn);
|
||||
stack.push_smallint(task.res);
|
||||
stack.push_cell(std::move(task.data));
|
||||
task.guard->dismiss();
|
||||
if (task.diff) {
|
||||
txn->commit_smartcontract(std::move(task.diff.value()));
|
||||
} else {
|
||||
txn->commit_smartcontract(std::move(task.smart));
|
||||
}
|
||||
}
|
||||
LOG(ERROR) << timer;
|
||||
timer = {};
|
||||
|
||||
txn_abort.dismiss();
|
||||
ton_db->commit_transaction(std::move(txn));
|
||||
timer = {};
|
||||
LOG(INFO) << "TonDB stats: \n" << ton_db->stats();
|
||||
}
|
||||
|
||||
void interpret_db_run_vm(IntCtx& ctx) {
|
||||
do_interpret_db_run_vm_parallel(ctx.error_stream, ctx.stack, ctx.ton_db, 0, 1);
|
||||
}
|
||||
|
||||
void interpret_db_run_vm_parallel(IntCtx& ctx) {
|
||||
auto threads_n = ctx.stack.pop_smallint_range(32, 0);
|
||||
auto tasks_n = ctx.stack.pop_smallint_range(1000000000);
|
||||
do_interpret_db_run_vm_parallel(ctx.error_stream, ctx.stack, ctx.ton_db, threads_n, tasks_n);
|
||||
}
|
||||
|
||||
void interpret_store_vm_cont(vm::Stack& stack) {
|
||||
auto vmcont = stack.pop_cont();
|
||||
auto cb = stack.pop_builder();
|
||||
|
@ -3518,8 +3408,6 @@ void init_words_vm(Dictionary& d, bool enable_debug) {
|
|||
// d.def_ctx_word("runvmcode ", std::bind(interpret_run_vm, _1, 0x40));
|
||||
// d.def_ctx_word("runvm ", std::bind(interpret_run_vm, _1, 0x45));
|
||||
d.def_ctx_word("runvmx ", std::bind(interpret_run_vm, _1, -1));
|
||||
d.def_ctx_word("dbrunvm ", interpret_db_run_vm);
|
||||
d.def_ctx_word("dbrunvm-parallel ", interpret_db_run_vm_parallel);
|
||||
d.def_stack_word("vmcont, ", interpret_store_vm_cont);
|
||||
d.def_stack_word("vmcont@ ", interpret_fetch_vm_cont);
|
||||
d.def_stack_word("(vmoplen) ", interpret_vmop_len);
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
Copyright 2017-2020 Telegram Systems LLP
|
||||
*/
|
||||
#include "func.h"
|
||||
#include "vm/boc.h"
|
||||
|
||||
namespace funC {
|
||||
|
||||
|
|
|
@ -33,6 +33,7 @@
|
|||
#include "td/utils/Status.h"
|
||||
#include <sstream>
|
||||
#include <iomanip>
|
||||
#include "vm/boc.h"
|
||||
|
||||
td::Result<std::string> compile_internal(char *config_json) {
|
||||
TRY_RESULT(input_json, td::json_decode(td::MutableSlice(config_json)))
|
||||
|
|
|
@ -244,7 +244,7 @@ cont bless(slice s) impure asm "BLESS";
|
|||
;;; In other words, the current smart contract agrees to buy some gas to finish the current transaction.
|
||||
;;; This action is required to process external messages, which bring no value (hence no gas) with themselves.
|
||||
;;;
|
||||
;;; For more details check [accept_message effects](https://ton.org/docs/#/smart-contracts/accept).
|
||||
;;; For more details check [accept_message effects](https://docs.ton.org/develop/smart-contracts/guidelines/accept).
|
||||
() accept_message() impure asm "ACCEPT";
|
||||
|
||||
;;; Sets current gas limit `gl` to the minimum of limit and `gm`, and resets the gas credit `gc` to zero.
|
||||
|
@ -282,10 +282,10 @@ int abs(int x) asm "ABS";
|
|||
|
||||
It is said that a primitive _loads_ some data,
|
||||
if it returns the data and the remainder of the slice
|
||||
(so it can also be used as [modifying method](https://ton.org/docs/#/func/statements?id=modifying-methods)).
|
||||
(so it can also be used as [modifying method](https://docs.ton.org/develop/func/statements#modifying-methods)).
|
||||
|
||||
It is said that a primitive _preloads_ some data, if it returns only the data
|
||||
(it can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods)).
|
||||
(it can be used as [non-modifying method](https://docs.ton.org/develop/func/statements#non-modifying-methods)).
|
||||
|
||||
Unless otherwise stated, loading and preloading primitives read the data from a prefix of the slice.
|
||||
-}
|
||||
|
@ -416,7 +416,7 @@ int builder_depth(builder b) asm "BDEPTH";
|
|||
# Builder primitives
|
||||
It is said that a primitive _stores_ a value `x` into a builder `b`
|
||||
if it returns a modified version of the builder `b'` with the value `x` stored at the end of it.
|
||||
It can be used as [non-modifying method](https://ton.org/docs/#/func/statements?id=non-modifying-methods).
|
||||
It can be used as [non-modifying method](https://docs.ton.org/develop/func/statements#non-modifying-methods).
|
||||
|
||||
All the primitives below first check whether there is enough space in the `builder`,
|
||||
and only then check the range of the value being serialized.
|
||||
|
|
|
@ -105,7 +105,7 @@ td::Result<td::Ref<vm::Cell>> DnsInterface::EntryData::as_cell() const {
|
|||
return error;
|
||||
}
|
||||
if (res.is_null()) {
|
||||
return td::Status::Error("Entry data is emtpy");
|
||||
return td::Status::Error("Entry data is empty");
|
||||
}
|
||||
return res;
|
||||
//dns_text#1eda _:Text = DNSRecord;
|
||||
|
|
|
@ -1,102 +0,0 @@
|
|||
"Asm.fif" include
|
||||
|
||||
PROGRAM{
|
||||
|
||||
NEWPROC load_dict
|
||||
NEWPROC generate_dict
|
||||
NEWPROC save_dict
|
||||
|
||||
NEWPROC do_get
|
||||
NEWPROC do_set
|
||||
NEWPROC do_erase
|
||||
|
||||
main PROC:<{
|
||||
DUP 1 INT EQUAL IF:<{
|
||||
DROP
|
||||
do_get CALL
|
||||
}>ELSE<{
|
||||
DUP 2 INT EQUAL IF:<{
|
||||
DROP
|
||||
do_set CALL
|
||||
}>ELSE<{
|
||||
DUP 3 INT EQUAL IF:<{
|
||||
DROP
|
||||
do_erase CALL
|
||||
}> }> }>
|
||||
-1 INT
|
||||
}>
|
||||
|
||||
do_get PROC:<{
|
||||
load_dict CALL
|
||||
32 INT
|
||||
DICTIGET
|
||||
}>
|
||||
|
||||
do_set PROC:<{
|
||||
load_dict CALL
|
||||
32 INT
|
||||
DICTISET
|
||||
save_dict CALL
|
||||
}>
|
||||
|
||||
do_erase PROC:<{
|
||||
load_dict CALL
|
||||
32 INT
|
||||
DICTIDEL
|
||||
DROP
|
||||
save_dict CALL
|
||||
}>
|
||||
|
||||
generate_dict PROC:<{
|
||||
4 INT 100 INT REPEAT:<{
|
||||
DUP 2DUP MUL ROT 617 INT ADD 1000 INT MOD
|
||||
}>
|
||||
DROP 100 INT
|
||||
NEWDICT
|
||||
SWAP REPEAT:<{
|
||||
s0 s2 XCHG
|
||||
NEWC
|
||||
16 STU
|
||||
s0 s2 XCHG
|
||||
32 INT
|
||||
DICTISETB
|
||||
}>
|
||||
}>
|
||||
|
||||
load_dict PROC:<{
|
||||
PUSHROOT
|
||||
CTOS DUP SEMPTY IF:<{
|
||||
DROP
|
||||
generate_dict CALL
|
||||
}>
|
||||
}>
|
||||
|
||||
save_dict PROC:<{
|
||||
NEWC
|
||||
STSLICE
|
||||
ENDC
|
||||
POPROOT
|
||||
}>
|
||||
|
||||
}END>s constant pmc_prog
|
||||
|
||||
{ 1 2 rot pmc_prog } : task_pmc_get
|
||||
{ 2 3 rot pmc_prog } : task_pmc_set
|
||||
{ 3 2 rot pmc_prog } : task_pmc_erase
|
||||
|
||||
{ task_pmc_get dbrunvm 2drop } : pmc_get
|
||||
{ task_pmc_set dbrunvm 2drop } : pmc_set
|
||||
{ task_pmc_erase dbrunvm 2drop } : pmc_erase
|
||||
|
||||
<b x{abacaba} s, <b x{dead} s, x{1dead} sr, b> <s sr, b> <s constant test_value
|
||||
|
||||
// 123 "a" pmc_get
|
||||
// { csr. } if
|
||||
// 123 "a" pmc_set
|
||||
|
||||
// test_value 123 x{a} task_pmc_set test_value 123 x{b} task_pmc_set test_value 123 x{c} task_pmc_set 3 3 dbrunvm-parallel
|
||||
|
||||
{ test_value 150 rot task_pmc_set } 0 { 1 + 2dup swap execute 7 roll 7 roll } 10000 times 2drop 10000 4 dbrunvm-parallel
|
||||
|
||||
// 123 "a" task_pmc_get 123 "b" task_pmc_get 123 "c" task_pmc_get 3 dbrunvm-parallel
|
||||
|
|
@ -113,6 +113,9 @@ td::Result<Ref<DataCell>> DataCell::create(td::ConstBitPtr data, unsigned bits,
|
|||
if (bits != 8 + hash_bytes * 8) {
|
||||
return td::Status::Error("Not enouch data for a Library special cell");
|
||||
}
|
||||
if (!refs.empty()) {
|
||||
return td::Status::Error("Library special cell has a cell reference");
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ namespace vm {
|
|||
struct VmLog {
|
||||
td::LogInterface *log_interface{td::log_interface};
|
||||
td::LogOptions log_options{td::log_options};
|
||||
enum { DumpStack = 2, ExecLocation = 4, GasRemaining = 8, DumpStackVerbose = 16 };
|
||||
enum { DumpStack = 2, ExecLocation = 4, GasRemaining = 8, DumpStackVerbose = 16, DumpC5 = 32 };
|
||||
int log_mask{1};
|
||||
static VmLog Null() {
|
||||
VmLog res;
|
||||
|
|
|
@ -83,6 +83,14 @@ std::string StackEntry::to_lisp_string() const {
|
|||
return std::move(os).str();
|
||||
}
|
||||
|
||||
static std::string cell_to_hex(const td::Ref<vm::Cell> &cell) {
|
||||
auto boc = vm::std_boc_serialize(cell);
|
||||
if (boc.is_ok()) {
|
||||
return td::buffer_to_hex(boc.move_as_ok().as_slice());
|
||||
}
|
||||
return "???";
|
||||
}
|
||||
|
||||
void StackEntry::dump(std::ostream& os, bool verbose) const {
|
||||
switch (tp) {
|
||||
case t_null:
|
||||
|
@ -94,12 +102,7 @@ void StackEntry::dump(std::ostream& os, bool verbose) const {
|
|||
case t_cell:
|
||||
if (ref.not_null()) {
|
||||
if (verbose) {
|
||||
std::string serialized = "???";
|
||||
auto boc = vm::std_boc_serialize(as_cell());
|
||||
if (boc.is_ok()) {
|
||||
serialized = td::buffer_to_hex(boc.move_as_ok().as_slice());
|
||||
}
|
||||
os << "C{" << serialized << "}";
|
||||
os << "C{" << cell_to_hex(as_cell()) << "}";
|
||||
} else {
|
||||
os << "C{" << *as_cell() << "}";
|
||||
}
|
||||
|
@ -109,7 +112,12 @@ void StackEntry::dump(std::ostream& os, bool verbose) const {
|
|||
break;
|
||||
case t_builder:
|
||||
if (ref.not_null()) {
|
||||
os << "BC{" << *as_builder() << "}";
|
||||
if (verbose) {
|
||||
Ref<CellBuilder> cb = as_builder();
|
||||
os << "BC{" << cell_to_hex(cb.write().finalize_novm()) << "}";
|
||||
} else {
|
||||
os << "BC{" << *as_builder() << "}";
|
||||
}
|
||||
} else {
|
||||
os << "BC{null}";
|
||||
}
|
||||
|
@ -117,7 +125,13 @@ void StackEntry::dump(std::ostream& os, bool verbose) const {
|
|||
case t_slice: {
|
||||
if (ref.not_null()) {
|
||||
os << "CS{";
|
||||
static_cast<Ref<CellSlice>>(ref)->dump(os, 1, false);
|
||||
if (verbose) {
|
||||
CellBuilder cb;
|
||||
cb.append_cellslice(as_slice());
|
||||
os << cell_to_hex(cb.finalize_novm());
|
||||
} else {
|
||||
static_cast<Ref<CellSlice>>(ref)->dump(os, 1, false);
|
||||
}
|
||||
os << '}';
|
||||
} else {
|
||||
os << "CS{null}";
|
||||
|
|
|
@ -96,10 +96,10 @@ td::Result<vm::StackEntry> convert_stack_entry(td::Slice str) {
|
|||
}
|
||||
if (l >= 3 && (str[0] == 'x' || str[0] == 'b') && str[1] == '{' && str.back() == '}') {
|
||||
unsigned char buff[128];
|
||||
int bits =
|
||||
(str[0] == 'x')
|
||||
? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1)
|
||||
: (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1);
|
||||
int bits = (str[0] == 'x')
|
||||
? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1)
|
||||
: (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff) * 8, str.begin() + 2,
|
||||
str.end() - 1);
|
||||
if (bits < 0) {
|
||||
return td::Status::Error("failed to parse raw b{...}/x{...} number");
|
||||
}
|
||||
|
|
|
@ -441,10 +441,16 @@ int VmState::step() {
|
|||
if (log.log_mask & vm::VmLog::DumpStackVerbose) {
|
||||
mode += 4;
|
||||
}
|
||||
std::unique_ptr<VmStateInterface> tmp_ctx;
|
||||
// install temporary dummy vm state interface to prevent charging for cell load operations during dump
|
||||
VmStateInterface::Guard guard(tmp_ctx.get());
|
||||
stack->dump(ss, mode);
|
||||
VM_LOG(this) << "stack:" << ss.str();
|
||||
}
|
||||
if (stack_trace) {
|
||||
std::unique_ptr<VmStateInterface> tmp_ctx;
|
||||
// install temporary dummy vm state interface to prevent charging for cell load operations during dump
|
||||
VmStateInterface::Guard guard(tmp_ctx.get());
|
||||
stack->dump(std::cerr, 3);
|
||||
}
|
||||
++steps;
|
||||
|
@ -523,6 +529,13 @@ int VmState::run() {
|
|||
res = vmoog.get_errno(); // no ~ for unhandled exceptions (to make their faking impossible)
|
||||
}
|
||||
if (!parent) {
|
||||
if ((log.log_mask & VmLog::DumpC5) && cstate.committed) {
|
||||
std::stringstream ss;
|
||||
ss << "final c5: ";
|
||||
StackEntry::maybe<Cell>(cstate.c5).dump(ss, true);
|
||||
ss << "\n";
|
||||
VM_LOG(this) << ss.str();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
restore_parent = true;
|
||||
|
|
|
@ -96,4 +96,17 @@ Operations for working with Merkle proofs, where cells can have non-zero level a
|
|||
|
||||
### Other changes
|
||||
* `GLOBALID` gets `ConfigParam 19` from the tuple, not from the config dict. This decreases gas usage.
|
||||
* `SENDMSG` gets `ConfigParam 24/25` (message prices) from the tuple, not from the config dict, and also uses `ConfigParam 43` to get max_msg_cells.
|
||||
* `SENDMSG` gets `ConfigParam 24/25` (message prices) from the tuple, not from the config dict, and also uses `ConfigParam 43` to get max_msg_cells.
|
||||
|
||||
|
||||
## Version 7
|
||||
|
||||
[Explicitly nullify](https://github.com/ton-blockchain/ton/pull/957/files) `due_payment` after due reimbursment.
|
||||
|
||||
## Version 8
|
||||
|
||||
- Check mode on invalid `action_send_msg`. Ignore action if `IGNORE_ERROR` (+2) bit is set, bounce if `BOUNCE_ON_FAIL` (+16) bit is set.
|
||||
- Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`.
|
||||
- Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent.
|
||||
- Allow unfreeze through external messages.
|
||||
- Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages.
|
545
docker/README.md
545
docker/README.md
|
@ -1,28 +1,525 @@
|
|||
# The Open Network Node
|
||||
Dockerfile for The Open Network Node
|
||||
# Official TON Docker image
|
||||
|
||||
1. [Dockerfile](#docker)
|
||||
2. [Kubernetes deployment on-premises](#deploy-on-premises-with-metallb-load-balancer-)
|
||||
3. [Kubernetes deployment on AWS](#deploy-on-aws-cloud-amazon-web-services)
|
||||
4. [Kubernetes deployment on GCP](#deploy-on-gcp-google-cloud-platform)
|
||||
5. [Kubernetes deployment on AliCloud](#deploy-on-ali-cloud)
|
||||
6. [Troubleshooting](#troubleshooting)
|
||||
## Prerequisites
|
||||
|
||||
The TON node, whether it is validator or fullnode, requires a public IP address.
|
||||
If your server is within an internal network or kubernetes you have to make sure that the required ports are available from the outside.
|
||||
|
||||
Also pay attention at [hardware requirements](https://docs.ton.org/participate/run-nodes/full-node) for TON fullnodes and validators. Pods and StatefulSets in this guide imply these requirements.
|
||||
|
||||
It is recommended to everyone to read Docker chapter first in order to get a better understanding about TON Docker image and its parameters.
|
||||
|
||||
## Docker
|
||||
|
||||
### Installation
|
||||
```docker pull ghcr.io/ton-blockchain/ton:latest```
|
||||
|
||||
### Configuration
|
||||
TON validator-engine supports number of command line parameters,
|
||||
these parameters can be handed over to the container via environment variables.
|
||||
Below is the list of supported arguments and their default values:
|
||||
|
||||
| Argument | Description | Mandatory? | Default value |
|
||||
|:------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:----------:|:-------------------------------------------------------:|
|
||||
| PUBLIC_IP | This will be a public IP address of your TON node. Normally it is the same IP address as your server's external IP. This also can be your proxy server or load balancer IP address. | yes | |
|
||||
| GLOBAL_CONFIG_URL | TON global configuration file. Mainnet - https://ton.org/global-config.json, Testnet - https://ton.org/testnet-global.config.json | no | https://api.tontech.io/ton/wallet-mainnet.autoconf.json |
|
||||
| DUMP_URL | URL to TON dump. Specify dump from https://dump.ton.org. If you are using testnet dump, make sure to download global config for testnet. | no | |
|
||||
| VALIDATOR_PORT | UDP port that must be available from the outside. Used for communication with other nodes. | no | 30001 |
|
||||
| CONSOLE_PORT | This TCP port is used to access validator's console. Not necessarily to be opened for external access. | no | 30002 |
|
||||
| LITE_PORT | Lite-server's TCP port. Used by lite-client. | no | 30003 |
|
||||
| LITESERVER | true or false. Set to true if you want up and running lite-server. | no | false |
|
||||
| STATE_TTL | Node's state will be gc'd after this time (in seconds). | no | 86400 |
|
||||
| ARCHIVE_TTL | Node's archived blocks will be deleted after this time (in seconds). | no | 86400 |
|
||||
| THREADS | Number of threads used by validator-engine. | no | 8 |
|
||||
| VERBOSITY | Verbosity level. | no | 3 |
|
||||
| CUSTOM_ARG | validator-engine might have some undocumented arguments. This is reserved for the test purposes.<br/>For example you can pass **--logname /var/ton-work/log** in order to have log files. | no | |
|
||||
|
||||
### Run the node - the quick way
|
||||
The below command runs docker container with a TON node, that will start synchronization process.
|
||||
|
||||
Notice **--network host** option, means that the Docker container will use the network namespace of the host machine.
|
||||
In this case there is no need to map ports between the host and the container. The container will use the same IP address and ports as the host.
|
||||
This approach simplifies networking configuration for the container, and usually is used on the dedicated server with assigned public IP.
|
||||
|
||||
Keep in mind that this option can also introduce security concerns because the container has access to the host's network interfaces directly, which might not be desirable in a multi-tenant environment.
|
||||
|
||||
Check your firewall configuration and make sure that at least UDP port 43677 is publicly available.
|
||||
Find out your PUBLIC_IP:
|
||||
```
|
||||
curl -4 ifconfig.me
|
||||
```
|
||||
and replace it in the command below:
|
||||
```
|
||||
docker run -d --name ton-node -v /data/db:/var/ton-work/db \
|
||||
-e "PUBLIC_IP=<PUBLIC_IP>" \
|
||||
-e "LITESERVER=true" \
|
||||
-e "DUMP_URL=https://dump.ton.org/dumps/latest.tar.lz" \
|
||||
--network host \
|
||||
-it ghcr.io/ton-blockchain/ton
|
||||
```
|
||||
If you don't need Lite-server, then remove -e "LITESERVER=true".
|
||||
|
||||
### Run the node - isolated way
|
||||
In production environments it is recommended to use **Port mapping** feature of Docker's default bridge network.
|
||||
When you use port mapping, Docker allocates a specific port on the host to forward traffic to a port inside the container.
|
||||
This is ideal for running multiple containers with isolated networks on the same host.
|
||||
```
|
||||
docker run -d --name ton-node -v /data/db:/var/ton-work/db \
|
||||
-e "PUBLIC_IP=<PUBLIC_IP>" \
|
||||
-e "DUMP_URL=https://dump.ton.org/dumps/latest.tar.lz" \
|
||||
-e "VALIDATOR_PORT=443" \
|
||||
-e "CONSOLE_PORT=88" \
|
||||
-e "LITE_PORT=443" \
|
||||
-e "LITESERVER=true" \
|
||||
-p 443:443/udp \
|
||||
-p 88:88/tcp \
|
||||
-p 443:443/tcp \
|
||||
-it ghcr.io/ton-blockchain/ton
|
||||
```
|
||||
Adjust ports per your need.
|
||||
Check your firewall configuration and make sure that customized ports (443/udp, 88/tcp and 443/tcp in this example) are publicly available.
|
||||
|
||||
### Verify if TON node is operating correctly
|
||||
After executing above command check the log files:
|
||||
|
||||
```docker logs ton-node```
|
||||
|
||||
This is totally fine if in the log output for some time (up to 15 minutes) you see messages like:
|
||||
|
||||
```log
|
||||
failed to download proof link: [Error : 651 : no nodes]
|
||||
```
|
||||
|
||||
After some time you should be able to see multiple messages similar to these below:
|
||||
```log
|
||||
failed to download key blocks: [Error : 652 : adnl query timeout]
|
||||
last key block is [ w=-1 s=9223372036854775808 seq=34879845 rcEsfLF3E80PqQPWesW+rlOY2EpXd5UDrW32SzRWgus= C1Hs+q2Vew+WxbGL6PU1P6R2iYUJVJs4032CTS/DQzI= ]
|
||||
getnextkey: [Error : 651 : not inited]
|
||||
downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=1442840576 (71MB/s)downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=1442840576 (71MB/s)
|
||||
finished downloading state (-1,8000000000000000,38585739):9E86E166AE7E24BAA22762766381440C625F47E2B11D72967BB58CE8C90F7EBA:5BFFF759380097DF178325A7151E9C0571C4E452A621441A03A0CECAED970F57: total=4520747390
|
||||
getnextkey: [Error : 651 : not inited]
|
||||
getnextkey: [Error : 651 : not inited]
|
||||
```
|
||||
As you noticed we have mounted docker volume to a local folder **/data/db**.
|
||||
Go inside this folder on your server and check if its size is growing (```sudo du -h .*```)
|
||||
|
||||
Now connect to the running container:
|
||||
```
|
||||
docker exec -ti ton-node /bin/bash
|
||||
```
|
||||
and try to connect and execute **getconfig** command via validator-engine-console:
|
||||
```
|
||||
validator-engine-console -k client -p server.pub -a localhost:$(jq .control[].port <<< cat /var/ton-work/db/config.json) -c getconfig
|
||||
```
|
||||
if you see a json output that means that validator-engine is up, now execute **last** command with a lite-client:
|
||||
```
|
||||
lite-client -a localhost:$(jq .liteservers[].port <<< cat /var/ton-work/db/config.json) -p liteserver.pub -c last
|
||||
```
|
||||
if you see the following output:
|
||||
```
|
||||
conn ready
|
||||
failed query: [Error : 652 : adnl query timeout]
|
||||
cannot get server version and time (server too old?)
|
||||
server version is too old (at least 1.1 with capabilities 1 required), some queries are unavailable
|
||||
fatal error executing command-line queries, skipping the rest
|
||||
```
|
||||
it means that the lite-server is up, but the node is not synchronized yet.
|
||||
Once the node is synchronized, the output of **last** command will be similar to this one:
|
||||
|
||||
```
|
||||
conn ready
|
||||
server version is 1.1, capabilities 7
|
||||
server time is 1719306580 (delta 0)
|
||||
last masterchain block is (-1,8000000000000000,20435927):47A517265B25CE4F2C8B3058D46343C070A4B31C5C37745390CE916C7D1CE1C5:279F9AA88C8146257E6C9B537905238C26E37DC2E627F2B6F1D558CB29A6EC82
|
||||
server time is 1719306580 (delta 0)
|
||||
zerostate id set to -1:823F81F306FF02694F935CF5021548E3CE2B86B529812AF6A12148879E95A128:67E20AC184B9E039A62667ACC3F9C00F90F359A76738233379EFA47604980CE8
|
||||
```
|
||||
If you can't make it working, refer to the [Troubleshooting](#troubleshooting) section below.
|
||||
### Use validator-engine-console
|
||||
```docker exec -ti ton-node /bin/bash```
|
||||
|
||||
```validator-engine-console -k client -p server.pub -a 127.0.0.1:$(jq .control[].port <<< cat /var/ton-work/db/config.json)```
|
||||
|
||||
### Use lite-client
|
||||
```docker exec -ti ton-node /bin/bash```
|
||||
|
||||
```lite-client -p liteserver.pub -a 127.0.0.1:$(jq .liteservers[].port <<< cat /var/ton-work/db/config.json)```
|
||||
|
||||
If you use lite-client outside the Docker container, copy the **liteserver.pub** from the container:
|
||||
|
||||
```docker cp ton-node:/var/ton-work/db/liteserver.pub /your/path```
|
||||
|
||||
```lite-client -p /your/path/liteserver.pub -a <PUBLIC_IP>:<LITE_PORT>```
|
||||
|
||||
### Stop TON docker container
|
||||
```
|
||||
docker stop ton-node
|
||||
```
|
||||
|
||||
## Kubernetes
|
||||
### Deploy in a quick way (without load balancer)
|
||||
If the nodes within your kubernetes cluster have external IPs,
|
||||
make sure that the PUBLIC_IP used for validator-engine matches the node's external IP.
|
||||
If all Kubernetes nodes are inside DMZ - skip this section.
|
||||
|
||||
#### Prepare
|
||||
If you are using **flannel** network driver you can find node's IP this way:
|
||||
```yaml
|
||||
kubectl get nodes
|
||||
kubectl describe node <NODE_NAME> | grep public-ip
|
||||
```
|
||||
for **calico** driver use:
|
||||
```yaml
|
||||
kubectl describe node <NODE_NAME> | grep IPv4Address
|
||||
```
|
||||
Double check if your Kubernetes node's external IP coincides with the host's IP address:
|
||||
```
|
||||
kubectl run --image=ghcr.io/ton-blockchain/ton:latest validator-engine-pod --env="HOST_IP=1.1.1.1" --env="PUBLIC_IP=1.1.1.1"
|
||||
kubectl exec -it validator-engine-pod -- curl -4 ifconfig.me
|
||||
kubectl delete pod validator-engine-pod
|
||||
```
|
||||
If IPs do not match, refer to the sections where load balancers are used.
|
||||
|
||||
Now do the following:
|
||||
* Add a label to this particular node.
|
||||
* By this label our pod will know where to be deployed and what storage to use:
|
||||
```
|
||||
kubectl label nodes <NODE_NAME> node_type=ton-validator
|
||||
```
|
||||
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-node-port.yaml](ton-node-port.yaml).
|
||||
* Replace **<LOCAL_STORAGE_PATH>** with a real path on host for Persistent Volume.
|
||||
* If you change the ports, make sure you specify appropriate env vars in Pod section.
|
||||
* If you want to use dynamic storage provisioning via volumeClaimTemplates, feel free to create own StorageClass.
|
||||
|
||||
#### Install
|
||||
```docker pull ghcr.io/ton-blockchain/ton:latest```
|
||||
#### Create volume
|
||||
```docker volume create ton-db```
|
||||
#### Run
|
||||
```docker run -d --name ton-node --mount source=ton-db,target=/var/ton-work/db --network host -e "PUBLIC_IP=<YOUR_PUBLIC_IP>" -e "CONSOLE_PORT=<TCP-PORT1>" -e "LITESERVER=true" -e "LITE_PORT=<TCP-PORT2>" -it ghcr.io/ton-blockchain/ton```
|
||||
```yaml
|
||||
kubectl apply -f ton-node-port.yaml
|
||||
```
|
||||
|
||||
this deployment uses host's network stack (**hostNetwork: true**) option and service of **NodePort** type.
|
||||
Actually you can also use service of type **LoadBalancer**.
|
||||
This way the service will get public IP assigned to the endpoints.
|
||||
|
||||
#### Verify installation
|
||||
See if service endpoints were correctly created:
|
||||
|
||||
```yaml
|
||||
kubectl get endpoints
|
||||
|
||||
NAME ENDPOINTS
|
||||
validator-engine-srv <PUBLIC_IP>:30002,<PUBLIC_IP>:30001,<PUBLIC_IP>:30003
|
||||
```
|
||||
Check the logs for the deployment status:
|
||||
```yaml
|
||||
kubectl logs validator-engine-pod
|
||||
```
|
||||
or go inside the pod and check if blockchain size is growing:
|
||||
```yaml
|
||||
kubectl exec --stdin --tty validator-engine-pod -- /bin/bash
|
||||
du -h .
|
||||
```
|
||||
### Deploy on-premises with metalLB load balancer
|
||||
|
||||
Often Kubernetes cluster is located in DMZ, is behind corporate firewall and access is controlled via proxy configuration.
|
||||
In this case we can't use host's network stack (**hostNetwork: true**) within a Pod and must manually proxy the access to the pod.
|
||||
|
||||
A **LoadBalancer** service type automatically provisions an external load balancer (such as those provided by cloud providers like AWS, GCP, Azure) and assigns a public IP address to your service. In a non-cloud environment or in a DMZ setup, you need to manually configure the load balancer.
|
||||
|
||||
If you are running your Kubernetes cluster on-premises or in an environment where an external load balancer is not automatically provided, you can use a load balancer implementation like MetalLB.
|
||||
|
||||
#### Prepare
|
||||
Select the node where persistent storage will be located for TON validator.
|
||||
* Add a label to this particular node. By this label our pod will know where to be deployed:
|
||||
```
|
||||
kubectl label nodes <NODE_NAME> node_type=ton-validator
|
||||
```
|
||||
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-metal-lb.yaml](ton-metal-lb.yaml).
|
||||
* Replace **<LOCAL_STORAGE_PATH>** with a real path on host for Persistent Volume.
|
||||
* If you change the ports, make sure you specify appropriate env vars in Pod section.
|
||||
* If you want to use dynamic storage provisioning via volumeClaimTemplates, feel free to create own StorageClass.
|
||||
|
||||
* Install MetalLB
|
||||
```yaml
|
||||
kubectl apply -f https://raw.githubusercontent.com/metallb/metallb/v0.14.5/config/manifests/metallb-native.yaml
|
||||
```
|
||||
|
||||
* Configure MetalLB
|
||||
Create a configuration map to define the IP address range that MetalLB can use for external load balancer services.
|
||||
```yaml
|
||||
apiVersion: metallb.io/v1beta1
|
||||
kind: IPAddressPool
|
||||
metadata:
|
||||
name: first-pool
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
addresses:
|
||||
- 10.244.1.0/24 <-- your CIDR address
|
||||
```
|
||||
apply configuration
|
||||
```yaml
|
||||
kubectl apply -f metallb-config.yaml
|
||||
```
|
||||
#### Install
|
||||
|
||||
```yaml
|
||||
kubectl apply -f ton-metal-lb.yaml
|
||||
```
|
||||
We do not use Pod Node Affinity here, since the Pod will remember the host with local storage it was bound to.
|
||||
|
||||
#### Verify installation
|
||||
Assume your network CIDR (**--pod-network-cidr**) within cluster is 10.244.1.0/24, then you can compare the output with the one below:
|
||||
```yaml
|
||||
kubectl get service
|
||||
|
||||
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
|
||||
kubernetes ClusterIP <NOT_IMPORTANT> <none> 443/TCP 28h
|
||||
validator-engine-srv LoadBalancer <NOT_IMPORTANT> 10.244.1.1 30001:30001/UDP,30002:30002/TCP,30003:30003/TCP 60m
|
||||
```
|
||||
you can see that endpoints are pointing to metal-LB subnet:
|
||||
```
|
||||
kubectl get endpoints
|
||||
|
||||
NAME ENDPOINTS
|
||||
kubernetes <IP>:6443
|
||||
validator-engine-srv 10.244.1.10:30002,10.244.1.10:30001,10.244.1.10:30003
|
||||
```
|
||||
and metal-LB itself operates with the right endpoint:
|
||||
```
|
||||
kubectl describe service metallb-webhook-service -n metallb-system
|
||||
|
||||
Name: metallb-webhook-service
|
||||
Namespace: metallb-system
|
||||
Selector: component=controller
|
||||
Type: ClusterIP
|
||||
IP: <NOT_IMPORTANT_IP>
|
||||
IPs: <NOT_IMPORTANT_IP>
|
||||
Port: <unset> 443/TCP
|
||||
TargetPort: 9443/TCP
|
||||
Endpoints: 10.244.2.3:9443 <-- CIDR
|
||||
```
|
||||
|
||||
Use the commands from the previous chapter to see if node operates properly.
|
||||
|
||||
### Deploy on AWS cloud (Amazon Web Services)
|
||||
|
||||
#### Prepare
|
||||
* AWS EKS is configured with worker nodes with selected add-ons:
|
||||
* CoreDNS - Enable service discovery within your cluster.
|
||||
* kube-proxy - Enable service networking within your cluster.
|
||||
* Amazon VPC CNI - Enable pod networking within your cluster.
|
||||
* Allocate Elastic IP.
|
||||
* Replace **<PUBLIC_IP>** with the newly created Elastic IP in [ton-aws.yaml](ton-aws.yaml)
|
||||
* Replace **<ELASTIC_IP_ID>** with Elastic IP allocation ID (see in AWS console).
|
||||
* Adjust StorageClass name. Make sure you are providing fast storage.
|
||||
|
||||
#### Install
|
||||
|
||||
```kubectl apply -f ton-aws.yaml```
|
||||
|
||||
#### Verify installation
|
||||
Use instructions from the previous sections.
|
||||
|
||||
### Deploy on GCP (Google Cloud Platform)
|
||||
|
||||
#### Prepare
|
||||
* Kubernetes cluster of type Standard (not Autopilot).
|
||||
* Premium static IP address.
|
||||
* Adjust firewall rules and security groups to allow ports 30001/udp, 30002/tcp and 30003/tcp (default ones).
|
||||
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-gcp.yaml](ton-gcp.yaml).
|
||||
* Adjust StorageClass name. Make sure you are providing fast storage.
|
||||
|
||||
* Load Balancer will be created automatically according to Kubernetes service in yaml file.
|
||||
|
||||
#### Install
|
||||
```kubectl apply -f ton-gcp.yaml```
|
||||
|
||||
#### Verify installation
|
||||
Use instructions from the previous sections.
|
||||
|
||||
### Deploy on Ali Cloud
|
||||
|
||||
#### Prepare
|
||||
* AliCloud kubernetes cluster.
|
||||
* Elastic IP.
|
||||
* Replace **<ELASTIC_IP_ID>** with Elastic IP allocation ID (see in AliCloud console).
|
||||
* Replace **<PUBLIC_IP>** (and ports if needed) in file [ton-ali.yaml](ton-ali.yaml) with the elastic IP attached to your CLB.
|
||||
* Adjust StorageClass name. Make sure you are providing fast storage.
|
||||
|
||||
#### Install
|
||||
```kubectl apply -f ton-ali.yaml```
|
||||
|
||||
As a result CLB (classic internal Load Balancer) will be created automatically with assigned external IP.
|
||||
|
||||
#### Verify installation
|
||||
Use instructions from the previous sections.
|
||||
|
||||
## Troubleshooting
|
||||
## Docker
|
||||
### TON node cannot synchronize, constantly see messages [Error : 651 : no nodes] in the log
|
||||
|
||||
Start the new container without starting validator-engine:
|
||||
|
||||
```
|
||||
docker run -it -v /data/db:/var/ton-work/db \
|
||||
-e "HOST_IP=<PUBLIC_IP>" \
|
||||
-e "PUBLIC_IP=<PUBLIC_IP>" \
|
||||
-e "LITESERVER=true" \
|
||||
-p 43677:43677/udp \
|
||||
-p 43678:43678/tcp \
|
||||
-p 43679:43679/tcp \
|
||||
--entrypoint /bin/bash \
|
||||
ghcr.io/ton-blockchain/ton
|
||||
```
|
||||
identify your PUBLIC_IP:
|
||||
```
|
||||
curl -4 ifconfig.me
|
||||
```
|
||||
compare if resulted IP coincides with your <PUBLIC_IP>.
|
||||
If it doesn't, exit container and launch it with the correct public IP.
|
||||
Then open UDP port (inside the container) you plan to allocate for TON node using netcat utility:
|
||||
```
|
||||
nc -ul 30001
|
||||
```
|
||||
and from any **other** linux machine check if you can reach this UDP port by sending a test message to that port:
|
||||
```
|
||||
echo "test" | nc -u <PUBLIC_IP> 30001
|
||||
```
|
||||
as a result inside the container you have to receive the "test" message.
|
||||
|
||||
If you don't get the message inside the docker container, that means that either your firewall, LoadBalancer, NAT or proxy is blocking it.
|
||||
Ask your system administrator for assistance.
|
||||
|
||||
In the same way you can check if TCP port is available:
|
||||
|
||||
Execute inside the container ```nc -l 30003``` and test connection from another server
|
||||
```nc -vz <PUBLIC_IP> 30003```
|
||||
|
||||
### Can't connect to lite-server
|
||||
* check if lite-server was enabled on start by passing **"LITESERVER=true"** argument;
|
||||
* check if TCP port (LITE_PORT) is available from the outside. From any other linux machine execute:
|
||||
```
|
||||
nc -vz <PUBLIC_IP> <LITE_PORT>
|
||||
```
|
||||
### How to see what traffic is generated inside the TON docker container?
|
||||
There is available a traffic monitoring utility inside the container, just execute:
|
||||
```
|
||||
iptraf-ng
|
||||
```
|
||||
Other tools like **tcpdump**, **nc**, **wget**, **curl**, **ifconfig**, **pv**, **plzip**, **jq** and **netstat** are also available.
|
||||
|
||||
### How to build TON docker image from sources?
|
||||
```
|
||||
git clone --recursive https://github.com/ton-blockchain/ton.git
|
||||
cd ton
|
||||
docker build .
|
||||
```
|
||||
|
||||
## Kubernetes
|
||||
### AWS
|
||||
#### After installing AWS LB, load balancer is still not available (pending):
|
||||
```
|
||||
kubectl get deployment -n kube-system aws-load-balancer-controller
|
||||
```
|
||||
Solution:
|
||||
|
||||
Try to install AWS LoadBalancer using ```Helm``` way.
|
||||
|
||||
---
|
||||
|
||||
#### After installing AWS LB and running ton node, service shows error:
|
||||
|
||||
```k describe service validator-engine-srv```
|
||||
|
||||
```log
|
||||
Failed build model due to unable to resolve at least one subnet (0 match VPC and tags: [kubernetes.io/role/elb])
|
||||
```
|
||||
Solution:
|
||||
|
||||
You haven't labeled the AWS subnets with the correct resource tags.
|
||||
|
||||
* Public Subnets should be resource tagged with: kubernetes.io/role/elb: 1
|
||||
* Private Subnets should be tagged with: kubernetes.io/role/internal-elb: 1
|
||||
* Both private and public subnets should be tagged with: kubernetes.io/cluster/${your-cluster-name}: owned
|
||||
* or if the subnets are also used by non-EKS resources kubernetes.io/cluster/${your-cluster-name}: shared
|
||||
|
||||
So create tags for at least one subnet:
|
||||
```
|
||||
kubernetes.io/role/elb: 1
|
||||
kubernetes.io/cluster/<YOUR_CLUSTER_NAME>: owner
|
||||
```
|
||||
---
|
||||
#### AWS Load Balancer works, but I still see ```[no nodes]``` in validator's log
|
||||
It is required to add the security group for the EC2 instances to the load balancer along with the default security group.
|
||||
It's a misleading that the default security group has "everything open."
|
||||
|
||||
Add security group (default name is usually something like 'launch-wizard-1').
|
||||
And make sure you allow the ports you specified or default ports 30001/udp, 30002/tcp and 30003/tcp.
|
||||
|
||||
You can also set inbound and outbound rules of new security group to allow ALL ports and for ALL protocols and for source CIDR 0.0.0.0/0 for testing purposes.
|
||||
|
||||
---
|
||||
|
||||
#### Pending PersistentVolumeClaim ```Waiting for a volume to be created either by the external provisioner 'ebs.csi.aws.com' or manually by the system administrator.```
|
||||
|
||||
Solution:
|
||||
|
||||
Configure Amazon EBS CSI driver for working PersistentVolumes in EKS.
|
||||
|
||||
1. Enable IAM OIDC provider
|
||||
```
|
||||
eksctl utils associate-iam-oidc-provider --region=us-west-2 --cluster=k8s-my --approve
|
||||
```
|
||||
|
||||
2. Create Amazon EBS CSI driver IAM role
|
||||
```
|
||||
eksctl create iamserviceaccount \
|
||||
--region us-west-2 \
|
||||
--name ebs-csi-controller-sa \
|
||||
--namespace kube-system \
|
||||
--cluster k8s-my \
|
||||
--attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \
|
||||
--approve \
|
||||
--role-only \
|
||||
--role-name AmazonEKS_EBS_CSI_DriverRole
|
||||
```
|
||||
|
||||
3. Add the Amazon EBS CSI add-on
|
||||
```yaml
|
||||
eksctl create addon --name aws-ebs-csi-driver --cluster k8s-my --service-account-role-arn arn:aws:iam::$(aws sts get-caller-identity --query Account --output text):role/AmazonEKS_EBS_CSI_DriverRole --force
|
||||
```
|
||||
### Google Cloud
|
||||
#### Load Balancer cannot obtain external IP (pending)
|
||||
|
||||
```
|
||||
kubectl describe service validator-engine-srv
|
||||
|
||||
Events:
|
||||
Type Reason Age From Message
|
||||
---- ------ ---- ---- -------
|
||||
Warning LoadBalancerMixedProtocolNotSupported 7m8s g-cloudprovider LoadBalancers with multiple protocols are not supported.
|
||||
Normal EnsuringLoadBalancer 113s (x7 over 7m8s) service-controller Ensuring load balancer
|
||||
Warning SyncLoadBalancerFailed 113s (x7 over 7m8s) service-controller Error syncing load balancer: failed to ensure load balancer: mixed protocol is not supported for LoadBalancer
|
||||
```
|
||||
Solution:
|
||||
|
||||
Create static IP address of type Premium in GCP console and use it as a value for field ```loadBalancerIP``` in Kubernetes service.
|
||||
|
||||
### Ali Cloud
|
||||
|
||||
#### Validator logs always show
|
||||
```
|
||||
Client got error [PosixError : Connection reset by peer : 104 : Error on [fd:45]]
|
||||
[!NetworkManager][&ADNL_WARNING] [networkmanager]: received too small proxy packet of size 21
|
||||
```
|
||||
Solution:
|
||||
|
||||
The node is sychnronizing, but very slow though.
|
||||
Try to use Network Load Balancer (NLB) instead of default CLB.
|
||||
|
||||
|
||||
If you don't need Liteserver, then remove -e "LITESERVER=true".
|
||||
|
||||
#### Use
|
||||
```docker exec -ti <container-id> /bin/bash```
|
||||
|
||||
```./validator-engine-console -k client -p server.pub -a <IP>:<TCP-PORT1>```
|
||||
|
||||
IP:PORT is shown at start of container.
|
||||
|
||||
#### Lite-client
|
||||
To use lite-client you need to get liteserver.pub from container.
|
||||
|
||||
```docker cp <container-id>:/var/ton-work/db/liteserver.pub /your/path```
|
||||
|
||||
Then you can connect to it, but be sure you use right port, it's different from fullnode console port.
|
||||
|
||||
```lite-client -a <IP>:<TCP-PORT2> -p liteserver.pub```
|
||||
|
|
|
@ -6,4 +6,4 @@
|
|||
"permissions" : 15
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
|
112
docker/init.sh
112
docker/init.sh
|
@ -1,30 +1,93 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# global config
|
||||
if [ ! -z "$GCONFURL" ]; then
|
||||
if [ ! -z "$TEST" ]; then
|
||||
echo -e "Running simple validator-engine test..."
|
||||
validator-engine -h
|
||||
test $? -eq 2 || { echo "simple validator-engine test failed"; exit 1; }
|
||||
exit 0;
|
||||
fi
|
||||
|
||||
# global config
|
||||
if [ ! -z "$GLOBAL_CONFIG_URL" ]; then
|
||||
echo -e "\e[1;32m[+]\e[0m Downloading provided global config."
|
||||
wget -q $GCONFURL -O /var/ton-work/db/ton-global.config
|
||||
wget -q $GLOBAL_CONFIG_URL -O /var/ton-work/db/ton-global.config
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m No global config provided, downloading default."
|
||||
echo -e "\e[1;33m[=]\e[0m No global config provided, downloading mainnet default."
|
||||
wget -q https://api.tontech.io/ton/wallet-mainnet.autoconf.json -O /var/ton-work/db/ton-global.config
|
||||
fi
|
||||
|
||||
if [ -z "$VALIDATOR_PORT" ]; then
|
||||
VALIDATOR_PORT=30001
|
||||
echo -e "\e[1;33m[=]\e[0m Using default VALIDATOR_PORT $VALIDATOR_PORT udp"
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m Using VALIDATOR_PORT $VALIDATOR_PORT udp"
|
||||
fi
|
||||
|
||||
# Init local config with IP:PORT
|
||||
if [ ! -z "$PUBLIC_IP" ]; then
|
||||
if [ -z "$CONSOLE_PORT" ]; then
|
||||
CONSOLE_PORT="43678"
|
||||
fi
|
||||
echo -e "\e[1;32m[+]\e[0m Using provided IP: $PUBLIC_IP:$CONSOLE_PORT"
|
||||
validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$CONSOLE_PORT"
|
||||
echo -e "\e[1;32m[+]\e[0m Using provided IP: $PUBLIC_IP:$VALIDATOR_PORT"
|
||||
else
|
||||
echo -e "\e[1;31m[!]\e[0m No IP:PORT provided, exiting"
|
||||
echo -e "\e[1;31m[!]\e[0m No PUBLIC_IP provided, exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "/var/ton-work/db/config.json" ]; then
|
||||
echo -e "\e[1;32m[+]\e[0m Initializing validator-engine:"
|
||||
echo validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$VALIDATOR_PORT"
|
||||
validator-engine -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --ip "$PUBLIC_IP:$VALIDATOR_PORT"
|
||||
test $? -eq 0 || { echo "Cannot initialize validator-engine"; exit 2; }
|
||||
fi
|
||||
|
||||
if [ ! -z "$DUMP_URL" ]; then
|
||||
echo -e "\e[1;32m[+]\e[0m Using provided dump $DUMP_URL"
|
||||
if [ ! -f "dump_downloaded" ]; then
|
||||
echo -e "\e[1;32m[+]\e[0m Downloading dump..."
|
||||
curl --retry 10 --retry-delay 30 -Ls $DUMP_URL | pv | plzip -d -n8 | tar -xC /var/ton-work/db
|
||||
touch dump_downloaded
|
||||
else
|
||||
echo -e "\e[1;32m[+]\e[0m Dump has been already used."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$STATE_TTL" ]; then
|
||||
STATE_TTL=86400
|
||||
echo -e "\e[1;33m[=]\e[0m Using default STATE_TTL $STATE_TTL"
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m Using STATE_TTL $STATE_TTL"
|
||||
fi
|
||||
|
||||
if [ -z "$ARCHIVE_TTL" ]; then
|
||||
ARCHIVE_TTL=86400
|
||||
echo -e "\e[1;33m[=]\e[0m Using default ARCHIVE_TTL $ARCHIVE_TTL"
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m Using ARCHIVE_TTL $ARCHIVE_TTL"
|
||||
fi
|
||||
|
||||
if [ -z "$THREADS" ]; then
|
||||
THREADS=8
|
||||
echo -e "\e[1;33m[=]\e[0m Using default THREADS $THREADS"
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m Using THREADS $THREADS"
|
||||
fi
|
||||
|
||||
if [ -z "$VERBOSITY" ]; then
|
||||
VERBOSITY=3
|
||||
echo -e "\e[1;33m[=]\e[0m Using default VERBOSITY $VERBOSITY"
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m Using VERBOSITY $VERBOSITY"
|
||||
fi
|
||||
|
||||
if [ -z "$CONSOLE_PORT" ]; then
|
||||
CONSOLE_PORT=30002
|
||||
echo -e "\e[1;33m[=]\e[0m Using default CONSOLE_PORT $CONSOLE_PORT tcp"
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m Using CONSOLE_PORT $CONSOLE_PORT tcp"
|
||||
fi
|
||||
|
||||
# Generating server certificate
|
||||
if [ -f "./server" ]; then
|
||||
echo -e "\e[1;33m[=]\e[0m Found existing server certificate, skipping"
|
||||
else
|
||||
else
|
||||
echo -e "\e[1;32m[+]\e[0m Generating and installing server certificate for remote control"
|
||||
read -r SERVER_ID1 SERVER_ID2 <<< $(generate-random-id -m keys -n server)
|
||||
echo "Server IDs: $SERVER_ID1 $SERVER_ID2"
|
||||
|
@ -32,16 +95,16 @@ else
|
|||
fi
|
||||
|
||||
# Generating client certificate
|
||||
if [ -f "./client" ]; then
|
||||
if [ -f "./client" ]; then
|
||||
echo -e "\e[1;33m[=]\e[0m Found existing client certificate, skipping"
|
||||
else
|
||||
read -r CLIENT_ID1 CLIENT_ID2 <<< $(generate-random-id -m keys -n client)
|
||||
echo -e "\e[1;32m[+]\e[0m Generated client private certificate $CLIENT_ID1 $CLIENT_ID2"
|
||||
echo -e "\e[1;32m[+]\e[0m Generated client public certificate"
|
||||
# Adding client permissions
|
||||
sed -e "s/CONSOLE-PORT/\"$(printf "%q" $CONSOLE_PORT)\"/g" -e "s~SERVER-ID~\"$(printf "%q" $SERVER_ID2)\"~g" -e "s~CLIENT-ID~\"$(printf "%q" $CLIENT_ID2)\"~g" control.template > control.new
|
||||
sed -e "s~\"control\"\ \:\ \[~$(printf "%q" $(cat control.new))~g" config.json > config.json.new
|
||||
mv config.json.new config.json
|
||||
sed -e "s/CONSOLE-PORT/\"$(printf "%q" $CONSOLE_PORT)\"/g" -e "s~SERVER-ID~\"$(printf "%q" $SERVER_ID2)\"~g" -e "s~CLIENT-ID~\"$(printf "%q" $CLIENT_ID2)\"~g" /var/ton-work/scripts/control.template > control.new
|
||||
sed -e "s~\"control\"\ \:\ \[~$(printf "%q" $(cat control.new))~g" /var/ton-work/db/config.json > config.json.new
|
||||
mv config.json.new /var/ton-work/db/config.json
|
||||
fi
|
||||
|
||||
# Liteserver
|
||||
|
@ -50,20 +113,25 @@ if [ -z "$LITESERVER" ]; then
|
|||
else
|
||||
if [ -f "./liteserver" ]; then
|
||||
echo -e "\e[1;33m[=]\e[0m Found existing liteserver certificate, skipping"
|
||||
else
|
||||
else
|
||||
echo -e "\e[1;32m[+]\e[0m Generating and installing liteserver certificate for remote control"
|
||||
read -r LITESERVER_ID1 LITESERVER_ID2 <<< $(generate-random-id -m keys -n liteserver)
|
||||
echo "Liteserver IDs: $LITESERVER_ID1 $LITESERVER_ID2"
|
||||
cp liteserver /var/ton-work/db/keyring/$LITESERVER_ID1
|
||||
|
||||
if [ -z "$LITE_PORT" ]; then
|
||||
LITE_PORT="43679"
|
||||
LITE_PORT=30003
|
||||
echo -e "\e[1;33m[=]\e[0m Using default LITE_PORT $LITE_PORT tcp"
|
||||
else
|
||||
echo -e "\e[1;33m[=]\e[0m Using LITE_PORT $LITE_PORT tcp"
|
||||
fi
|
||||
|
||||
LITESERVERS=$(printf "%q" "\"liteservers\":[{\"id\":\"$LITESERVER_ID2\",\"port\":\"$LITE_PORT\"}")
|
||||
sed -e "s~\"liteservers\"\ \:\ \[~$LITESERVERS~g" config.json > config.json.liteservers
|
||||
mv config.json.liteservers config.json
|
||||
sed -e "s~\"liteservers\"\ \:\ \[~$LITESERVERS~g" /var/ton-work/db/config.json > config.json.liteservers
|
||||
mv config.json.liteservers /var/ton-work/db/config.json
|
||||
fi
|
||||
fi
|
||||
|
||||
echo -e "\e[1;32m[+]\e[0m Running validator-engine"
|
||||
|
||||
exec validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db
|
||||
echo -e "\e[1;32m[+]\e[0m Starting validator-engine:"
|
||||
echo validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --state-ttl $STATE_TTL --archive-ttl $ARCHIVE_TTL --threads $THREADS --verbosity $VERBOSITY $CUSTOM_ARG
|
||||
exec validator-engine -c /var/ton-work/db/config.json -C /var/ton-work/db/ton-global.config --db /var/ton-work/db --state-ttl $STATE_TTL --archive-ttl $ARCHIVE_TTL --threads $THREADS --verbosity $VERBOSITY $CUSTOM_ARG
|
||||
|
|
121
docker/ton-ali.yaml
Normal file
121
docker/ton-ali.yaml
Normal file
|
@ -0,0 +1,121 @@
|
|||
apiVersion: "apps/v1"
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: validator-engine-pod
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: validator-engine-pvc
|
||||
spec:
|
||||
storageClassName: alicloud-disk-ssd
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 800Gi
|
||||
serviceName: validator-engine-srv-headless
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: validator-engine-pod
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: validator-engine-container
|
||||
image: ghcr.io/neodix42/ton:latest
|
||||
env:
|
||||
- name: PUBLIC_IP
|
||||
value: "<PUBLIC_IP>"
|
||||
- name: GLOBAL_CONFIG_URL
|
||||
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||
- name: DUMP_URL
|
||||
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||
- name: LITESERVER
|
||||
value: "true"
|
||||
- name: VALIDATOR_PORT
|
||||
value: "30001"
|
||||
- name: CONSOLE_PORT
|
||||
value: "30002"
|
||||
- name: LITE_PORT
|
||||
value: "30003"
|
||||
- name: STATE_TTL
|
||||
value: "86400"
|
||||
- name: ARCHIVE_TTL
|
||||
value: "86400"
|
||||
- name: THREADS
|
||||
value: "8"
|
||||
- name: VERBOSITY
|
||||
value: "3"
|
||||
ports:
|
||||
- containerPort: 30001
|
||||
protocol: UDP
|
||||
- containerPort: 30002
|
||||
protocol: TCP
|
||||
- containerPort: 30003
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: "/var/ton-work/db"
|
||||
name: validator-engine-pvc
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Gi"
|
||||
cpu: "16"
|
||||
limits:
|
||||
memory: "128Gi"
|
||||
cpu: "32"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: validator-engine-srv
|
||||
annotations:
|
||||
service.beta.kubernetes.io/alibaba-cloud-loadbalancer-eip-ids: "<ELASTIC_IP_ID>"
|
||||
service.beta.kubernetes.io/alibaba-cloud-loadbalancer-address-type: "intranet"
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
externalTrafficPolicy: Local
|
||||
ports:
|
||||
- name: validator-udp
|
||||
nodePort: 30001
|
||||
port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
- name: console-tcp
|
||||
nodePort: 30002
|
||||
port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
- name: ls-tcp
|
||||
nodePort: 30003
|
||||
port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: validator-engine-srv-headless
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: validator-udp
|
||||
port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
- name: console-tcp
|
||||
port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
- name: ls-tcp
|
||||
port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
122
docker/ton-aws.yaml
Normal file
122
docker/ton-aws.yaml
Normal file
|
@ -0,0 +1,122 @@
|
|||
apiVersion: "apps/v1"
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: validator-engine-pod
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: validator-engine-pvc
|
||||
spec:
|
||||
storageClassName: gp2
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 800Gi
|
||||
serviceName: validator-engine-srv-headless
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: validator-engine-pod
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: validator-engine-container
|
||||
image: ghcr.io/neodix42/ton:latest
|
||||
env:
|
||||
- name: PUBLIC_IP
|
||||
value: "<PUBLIC_IP>"
|
||||
- name: GLOBAL_CONFIG_URL
|
||||
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||
- name: DUMP_URL
|
||||
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||
- name: LITESERVER
|
||||
value: "true"
|
||||
- name: VALIDATOR_PORT
|
||||
value: "30001"
|
||||
- name: CONSOLE_PORT
|
||||
value: "30002"
|
||||
- name: LITE_PORT
|
||||
value: "30003"
|
||||
- name: STATE_TTL
|
||||
value: "86400"
|
||||
- name: ARCHIVE_TTL
|
||||
value: "86400"
|
||||
- name: THREADS
|
||||
value: "8"
|
||||
- name: VERBOSITY
|
||||
value: "3"
|
||||
ports:
|
||||
- containerPort: 30001
|
||||
protocol: UDP
|
||||
- containerPort: 30002
|
||||
protocol: TCP
|
||||
- containerPort: 30003
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: "/var/ton-work/db"
|
||||
name: validator-engine-pvc
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Gi"
|
||||
cpu: "16"
|
||||
limits:
|
||||
memory: "128Gi"
|
||||
cpu: "32"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: validator-engine-srv
|
||||
annotations:
|
||||
service.beta.kubernetes.io/aws-load-balancer-type: external
|
||||
service.beta.kubernetes.io/aws-load-balancer-nlb-target-type: ip
|
||||
service.beta.kubernetes.io/aws-load-balancer-scheme: internet-facing
|
||||
service.beta.kubernetes.io/aws-load-balancer-eip-allocations: "<ELASTIC_IP_ID>" # Replace with your EIP allocation ID
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- name: validator-udp
|
||||
nodePort: 30001
|
||||
port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
- name: console-tcp
|
||||
nodePort: 30002
|
||||
port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
- name: ls-tcp
|
||||
nodePort: 30003
|
||||
port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: validator-engine-srv-headless
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: validator-udp
|
||||
port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
- name: console-tcp
|
||||
port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
- name: ls-tcp
|
||||
port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
134
docker/ton-gcp.yaml
Normal file
134
docker/ton-gcp.yaml
Normal file
|
@ -0,0 +1,134 @@
|
|||
apiVersion: "apps/v1"
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: validator-engine-pod
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: validator-engine-pvc
|
||||
spec:
|
||||
storageClassName: standard-rwo
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 800Gi
|
||||
serviceName: validator-engine-srv-headless
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: validator-engine-pod
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
containers:
|
||||
- name: validator-engine-container
|
||||
image: ghcr.io/neodix42/ton:latest
|
||||
env:
|
||||
- name: PUBLIC_IP
|
||||
value: "<PUBLIC_IP>"
|
||||
- name: GLOBAL_CONFIG_URL
|
||||
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||
- name: DUMP_URL
|
||||
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||
- name: LITESERVER
|
||||
value: "true"
|
||||
- name: VALIDATOR_PORT
|
||||
value: "30001"
|
||||
- name: CONSOLE_PORT
|
||||
value: "30002"
|
||||
- name: LITE_PORT
|
||||
value: "30003"
|
||||
- name: STATE_TTL
|
||||
value: "86400"
|
||||
- name: ARCHIVE_TTL
|
||||
value: "86400"
|
||||
- name: THREADS
|
||||
value: "8"
|
||||
- name: VERBOSITY
|
||||
value: "3"
|
||||
ports:
|
||||
- containerPort: 30001
|
||||
protocol: UDP
|
||||
- containerPort: 30002
|
||||
protocol: TCP
|
||||
- containerPort: 30003
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- mountPath: "/var/ton-work/db"
|
||||
name: validator-engine-pvc
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Gi"
|
||||
cpu: "16"
|
||||
limits:
|
||||
memory: "128Gi"
|
||||
cpu: "32"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: validator-engine-srv
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: <PUBLIC_IP>
|
||||
ports:
|
||||
- port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
selector:
|
||||
name: validator-engine-pod
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: validator-engine-console-srv
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: <PUBLIC_IP>
|
||||
ports:
|
||||
- port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: lite-server-srv
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
loadBalancerIP: <PUBLIC_IP>
|
||||
ports:
|
||||
- port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: validator-engine-srv-headless
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: validator-udp
|
||||
port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
- name: console-tcp
|
||||
port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
- name: ls-tcp
|
||||
port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
118
docker/ton-metal-lb.yaml
Normal file
118
docker/ton-metal-lb.yaml
Normal file
|
@ -0,0 +1,118 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: validator-engine-pod
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
volumes:
|
||||
- name: validator-engine-pv
|
||||
persistentVolumeClaim:
|
||||
claimName: validator-engine-pvc
|
||||
containers:
|
||||
- name: validator-engine-container
|
||||
image: ghcr.io/neodix42/ton:latest
|
||||
env:
|
||||
- name: PUBLIC_IP
|
||||
value: "<PUBLIC_IP>"
|
||||
- name: GLOBAL_CONFIG_URL
|
||||
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||
- name: DUMP_URL
|
||||
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||
- name: LITESERVER
|
||||
value: "true"
|
||||
- name: VALIDATOR_PORT
|
||||
value: "30001"
|
||||
- name: CONSOLE_PORT
|
||||
value: "30002"
|
||||
- name: LITE_PORT
|
||||
value: "30003"
|
||||
- name: STATE_TTL
|
||||
value: "86400"
|
||||
- name: ARCHIVE_TTL
|
||||
value: "86400"
|
||||
- name: THREADS
|
||||
value: "8"
|
||||
- name: VERBOSITY
|
||||
value: "3"
|
||||
volumeMounts:
|
||||
- mountPath: "/var/ton-work/db"
|
||||
name: validator-engine-pv
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Gi"
|
||||
cpu: "16"
|
||||
limits:
|
||||
memory: "128Gi"
|
||||
cpu: "32"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: validator-engine-srv
|
||||
annotations:
|
||||
metallb.universe.tf/address-pool: first-pool
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
ports:
|
||||
- name: validator-engine-public-udp-port
|
||||
nodePort: 30001
|
||||
port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
- name: validator-console-tcp-port
|
||||
nodePort: 30002
|
||||
port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
- name: lite-server-tcp-port
|
||||
nodePort: 30003
|
||||
port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-storage
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: validator-engine-pv
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
storageClassName: local-storage
|
||||
capacity:
|
||||
storage: 800Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadOnlyMany
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
local:
|
||||
path: <LOCAL_STORAGE_PATH>
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node_type
|
||||
operator: In
|
||||
values:
|
||||
- ton-validator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: validator-engine-pvc
|
||||
spec:
|
||||
storageClassName: local-storage
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 800Gi
|
126
docker/ton-node-port.yaml
Normal file
126
docker/ton-node-port.yaml
Normal file
|
@ -0,0 +1,126 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: validator-engine-pod
|
||||
labels:
|
||||
name: validator-engine-pod
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node_type
|
||||
operator: In
|
||||
values:
|
||||
- ton-validator
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: validator-engine-pv
|
||||
persistentVolumeClaim:
|
||||
claimName: validator-engine-pvc
|
||||
containers:
|
||||
- name: validator-engine-container
|
||||
image: ghcr.io/neodix42/ton:latest
|
||||
env:
|
||||
- name: PUBLIC_IP
|
||||
value: "<PUBLIC_IP>"
|
||||
- name: GLOBAL_CONFIG_URL
|
||||
value: "https://api.tontech.io/ton/wallet-mainnet.autoconf.json"
|
||||
- name: DUMP_URL
|
||||
value: "https://dump.ton.org/dumps/latest.tar.lz"
|
||||
- name: LITESERVER
|
||||
value: "true"
|
||||
- name: VALIDATOR_PORT
|
||||
value: "30001"
|
||||
- name: CONSOLE_PORT
|
||||
value: "30002"
|
||||
- name: LITE_PORT
|
||||
value: "30003"
|
||||
- name: STATE_TTL
|
||||
value: "86400"
|
||||
- name: ARCHIVE_TTL
|
||||
value: "86400"
|
||||
- name: THREADS
|
||||
value: "8"
|
||||
- name: VERBOSITY
|
||||
value: "3"
|
||||
volumeMounts:
|
||||
- mountPath: "/var/ton-work/db"
|
||||
name: validator-engine-pv
|
||||
resources:
|
||||
requests:
|
||||
memory: "64Gi"
|
||||
cpu: "16"
|
||||
limits:
|
||||
memory: "128Gi"
|
||||
cpu: "32"
|
||||
---
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: validator-engine-srv
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: validator-engine-public-udp-port
|
||||
nodePort: 30001
|
||||
port: 30001
|
||||
targetPort: 30001
|
||||
protocol: UDP
|
||||
- name: validator-console-tcp-port
|
||||
nodePort: 30002
|
||||
port: 30002
|
||||
targetPort: 30002
|
||||
protocol: TCP
|
||||
- name: lite-server-tcp-port
|
||||
nodePort: 30003
|
||||
port: 30003
|
||||
targetPort: 30003
|
||||
protocol: TCP
|
||||
selector:
|
||||
name: validator-engine-pod
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: local-storage
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: validator-engine-pv
|
||||
labels:
|
||||
type: local
|
||||
spec:
|
||||
storageClassName: local-storage
|
||||
capacity:
|
||||
storage: 800Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadOnlyMany
|
||||
persistentVolumeReclaimPolicy: Retain
|
||||
local:
|
||||
path: <LOCAL_STORAGE_PATH>
|
||||
nodeAffinity:
|
||||
required:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: node_type
|
||||
operator: In
|
||||
values:
|
||||
- ton-validator
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: validator-engine-pvc
|
||||
spec:
|
||||
storageClassName: local-storage
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 800Gi
|
|
@ -35,7 +35,7 @@ else()
|
|||
add_library(emulator STATIC ${EMULATOR_SOURCE} ${EMULATOR_HEADERS})
|
||||
endif()
|
||||
|
||||
target_link_libraries(emulator PUBLIC emulator_static)
|
||||
target_link_libraries(emulator PUBLIC emulator_static git)
|
||||
generate_export_header(emulator EXPORT_FILE_NAME ${CMAKE_CURRENT_BINARY_DIR}/emulator_export.h)
|
||||
target_include_directories(emulator PUBLIC
|
||||
$<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
|
||||
|
@ -48,7 +48,7 @@ if (USE_EMSCRIPTEN)
|
|||
add_executable(emulator-emscripten ${EMULATOR_EMSCRIPTEN_SOURCE})
|
||||
target_link_libraries(emulator-emscripten PUBLIC emulator)
|
||||
target_link_options(emulator-emscripten PRIVATE -sEXPORTED_RUNTIME_METHODS=_malloc,free,UTF8ToString,stringToUTF8,allocate,ALLOC_NORMAL,lengthBytesUTF8)
|
||||
target_link_options(emulator-emscripten PRIVATE -sEXPORTED_FUNCTIONS=_emulate,_free,_run_get_method,_create_emulator,_destroy_emulator,_emulate_with_emulator)
|
||||
target_link_options(emulator-emscripten PRIVATE -sEXPORTED_FUNCTIONS=_emulate,_free,_run_get_method,_create_emulator,_destroy_emulator,_emulate_with_emulator,_version)
|
||||
target_link_options(emulator-emscripten PRIVATE -sEXPORT_NAME=EmulatorModule)
|
||||
target_link_options(emulator-emscripten PRIVATE -sERROR_ON_UNDEFINED_SYMBOLS=0)
|
||||
target_link_options(emulator-emscripten PRIVATE -Oz)
|
||||
|
|
|
@ -254,4 +254,8 @@ const char *run_get_method(const char *params, const char* stack, const char* co
|
|||
return output;
|
||||
}
|
||||
|
||||
const char *version() {
|
||||
return emulator_version();
|
||||
}
|
||||
|
||||
}
|
|
@ -9,6 +9,7 @@
|
|||
#include "tvm-emulator.hpp"
|
||||
#include "crypto/vm/stack.hpp"
|
||||
#include "crypto/vm/memo.h"
|
||||
#include "git.h"
|
||||
|
||||
td::Result<td::Ref<vm::Cell>> boc_b64_to_cell(const char *boc) {
|
||||
TRY_RESULT_PREFIX(boc_decoded, td::base64_decode(td::Slice(boc)), "Can't decode base64 boc: ");
|
||||
|
@ -65,7 +66,18 @@ const char *external_not_accepted_response(std::string&& vm_log, int vm_exit_cod
|
|||
|
||||
td::Result<block::Config> decode_config(const char* config_boc) {
|
||||
TRY_RESULT_PREFIX(config_params_cell, boc_b64_to_cell(config_boc), "Can't deserialize config params boc: ");
|
||||
auto global_config = block::Config(config_params_cell, td::Bits256::zero(), block::Config::needWorkchainInfo | block::Config::needSpecialSmc | block::Config::needCapabilities);
|
||||
auto config_dict = std::make_unique<vm::Dictionary>(config_params_cell, 32);
|
||||
auto config_addr_cell = config_dict->lookup_ref(td::BitArray<32>::zero());
|
||||
if (config_addr_cell.is_null()) {
|
||||
return td::Status::Error("Can't find config address (param 0) is missing in config params");
|
||||
}
|
||||
auto config_addr_cs = vm::load_cell_slice(std::move(config_addr_cell));
|
||||
if (config_addr_cs.size() != 0x100) {
|
||||
return td::Status::Error(PSLICE() << "configuration parameter 0 with config address has wrong size");
|
||||
}
|
||||
ton::StdSmcAddress config_addr;
|
||||
config_addr_cs.fetch_bits_to(config_addr);
|
||||
auto global_config = block::Config(config_params_cell, std::move(config_addr), block::Config::needWorkchainInfo | block::Config::needSpecialSmc | block::Config::needCapabilities);
|
||||
TRY_STATUS_PREFIX(global_config.unpack(), "Can't unpack config params: ");
|
||||
return global_config;
|
||||
}
|
||||
|
@ -76,8 +88,17 @@ void *transaction_emulator_create(const char *config_params_boc, int vm_log_verb
|
|||
LOG(ERROR) << global_config_res.move_as_error().message();
|
||||
return nullptr;
|
||||
}
|
||||
auto global_config = std::make_shared<block::Config>(global_config_res.move_as_ok());
|
||||
return new emulator::TransactionEmulator(std::move(global_config), vm_log_verbosity);
|
||||
}
|
||||
|
||||
return new emulator::TransactionEmulator(global_config_res.move_as_ok(), vm_log_verbosity);
|
||||
void *emulator_config_create(const char *config_params_boc) {
|
||||
auto config = decode_config(config_params_boc);
|
||||
if (config.is_error()) {
|
||||
LOG(ERROR) << "Error decoding config: " << config.move_as_error();
|
||||
return nullptr;
|
||||
}
|
||||
return new block::Config(config.move_as_ok());
|
||||
}
|
||||
|
||||
const char *transaction_emulator_emulate_transaction(void *transaction_emulator, const char *shard_account_boc, const char *message_boc) {
|
||||
|
@ -319,7 +340,21 @@ bool transaction_emulator_set_config(void *transaction_emulator, const char* con
|
|||
return false;
|
||||
}
|
||||
|
||||
emulator->set_config(global_config_res.move_as_ok());
|
||||
emulator->set_config(std::make_shared<block::Config>(global_config_res.move_as_ok()));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void config_deleter(block::Config* ptr) {
|
||||
// We do not delete the config object, since ownership management is delegated to the caller
|
||||
}
|
||||
|
||||
bool transaction_emulator_set_config_object(void *transaction_emulator, void* config) {
|
||||
auto emulator = static_cast<emulator::TransactionEmulator *>(transaction_emulator);
|
||||
|
||||
std::shared_ptr<block::Config> config_ptr(static_cast<block::Config *>(config), config_deleter);
|
||||
|
||||
emulator->set_config(config_ptr);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -461,6 +496,13 @@ bool tvm_emulator_set_c7(void *tvm_emulator, const char *address, uint32_t unixt
|
|||
return true;
|
||||
}
|
||||
|
||||
bool tvm_emulator_set_config_object(void* tvm_emulator, void* config) {
|
||||
auto emulator = static_cast<emulator::TvmEmulator *>(tvm_emulator);
|
||||
auto global_config = std::shared_ptr<block::Config>(static_cast<block::Config *>(config), config_deleter);
|
||||
emulator->set_config(global_config);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tvm_emulator_set_prev_blocks_info(void *tvm_emulator, const char* info_boc) {
|
||||
auto emulator = static_cast<emulator::TvmEmulator *>(tvm_emulator);
|
||||
|
||||
|
@ -672,3 +714,16 @@ const char *tvm_emulator_send_internal_message(void *tvm_emulator, const char *m
|
|||
void tvm_emulator_destroy(void *tvm_emulator) {
|
||||
delete static_cast<emulator::TvmEmulator *>(tvm_emulator);
|
||||
}
|
||||
|
||||
void emulator_config_destroy(void *config) {
|
||||
delete static_cast<block::Config *>(config);
|
||||
}
|
||||
|
||||
const char* emulator_version() {
|
||||
auto version_json = td::JsonBuilder();
|
||||
auto obj = version_json.enter_object();
|
||||
obj("emulatorLibCommitHash", GitMetadata::CommitSHA1());
|
||||
obj("emulatorLibCommitDate", GitMetadata::CommitDate());
|
||||
obj.leave();
|
||||
return strdup(version_json.string_builder().as_cslice().c_str());
|
||||
}
|
||||
|
|
|
@ -16,6 +16,13 @@ extern "C" {
|
|||
*/
|
||||
EMULATOR_EXPORT void *transaction_emulator_create(const char *config_params_boc, int vm_log_verbosity);
|
||||
|
||||
/**
|
||||
* @brief Creates Config object from base64 encoded BoC
|
||||
* @param config_params_boc Base64 encoded BoC serialized Config dictionary (Hashmap 32 ^Cell)
|
||||
* @return Pointer to Config object or nullptr in case of error
|
||||
*/
|
||||
EMULATOR_EXPORT void *emulator_config_create(const char *config_params_boc);
|
||||
|
||||
/**
|
||||
* @brief Set unixtime for emulation
|
||||
* @param transaction_emulator Pointer to TransactionEmulator object
|
||||
|
@ -49,7 +56,7 @@ EMULATOR_EXPORT bool transaction_emulator_set_rand_seed(void *transaction_emulat
|
|||
EMULATOR_EXPORT bool transaction_emulator_set_ignore_chksig(void *transaction_emulator, bool ignore_chksig);
|
||||
|
||||
/**
|
||||
* @brief Set unixtime for emulation
|
||||
* @brief Set config for emulation
|
||||
* @param transaction_emulator Pointer to TransactionEmulator object
|
||||
* @param config_boc Base64 encoded BoC serialized Config dictionary (Hashmap 32 ^Cell)
|
||||
* @return true in case of success, false in case of error
|
||||
|
@ -57,7 +64,15 @@ EMULATOR_EXPORT bool transaction_emulator_set_ignore_chksig(void *transaction_em
|
|||
EMULATOR_EXPORT bool transaction_emulator_set_config(void *transaction_emulator, const char* config_boc);
|
||||
|
||||
/**
|
||||
* @brief Set unixtime for emulation
|
||||
* @brief Set config for emulation
|
||||
* @param transaction_emulator Pointer to TransactionEmulator object
|
||||
* @param config Pointer to Config object
|
||||
* @return true in case of success, false in case of error
|
||||
*/
|
||||
EMULATOR_EXPORT bool transaction_emulator_set_config_object(void *transaction_emulator, void* config);
|
||||
|
||||
/**
|
||||
* @brief Set libraries for emulation
|
||||
* @param transaction_emulator Pointer to TransactionEmulator object
|
||||
* @param libs_boc Base64 encoded BoC serialized shared libraries dictionary (HashmapE 256 ^Cell).
|
||||
* @return true in case of success, false in case of error
|
||||
|
@ -167,6 +182,14 @@ EMULATOR_EXPORT bool tvm_emulator_set_libraries(void *tvm_emulator, const char *
|
|||
*/
|
||||
EMULATOR_EXPORT bool tvm_emulator_set_c7(void *tvm_emulator, const char *address, uint32_t unixtime, uint64_t balance, const char *rand_seed_hex, const char *config);
|
||||
|
||||
/**
|
||||
* @brief Set config for TVM emulator
|
||||
* @param tvm_emulator Pointer to TVM emulator
|
||||
* @param config Pointer to Config object
|
||||
* @return true in case of success, false in case of error
|
||||
*/
|
||||
EMULATOR_EXPORT bool tvm_emulator_set_config_object(void* tvm_emulator, void* config);
|
||||
|
||||
/**
|
||||
* @brief Set tuple of previous blocks (13th element of c7)
|
||||
* @param tvm_emulator Pointer to TVM emulator
|
||||
|
@ -278,6 +301,17 @@ EMULATOR_EXPORT const char *tvm_emulator_send_internal_message(void *tvm_emulato
|
|||
*/
|
||||
EMULATOR_EXPORT void tvm_emulator_destroy(void *tvm_emulator);
|
||||
|
||||
/**
|
||||
* @brief Destroy Config object
|
||||
* @param tvm_emulator Pointer to Config object
|
||||
*/
|
||||
EMULATOR_EXPORT void emulator_config_destroy(void *config);
|
||||
|
||||
/**
|
||||
* @brief Get git commit hash and date of the library
|
||||
*/
|
||||
EMULATOR_EXPORT const char* emulator_version();
|
||||
|
||||
#ifdef __cplusplus
|
||||
} // extern "C"
|
||||
#endif
|
||||
|
|
|
@ -4,6 +4,7 @@ _transaction_emulator_set_lt
|
|||
_transaction_emulator_set_rand_seed
|
||||
_transaction_emulator_set_ignore_chksig
|
||||
_transaction_emulator_set_config
|
||||
_transaction_emulator_set_config_object
|
||||
_transaction_emulator_set_libs
|
||||
_transaction_emulator_set_debug_enabled
|
||||
_transaction_emulator_set_prev_blocks_info
|
||||
|
@ -11,9 +12,12 @@ _transaction_emulator_emulate_transaction
|
|||
_transaction_emulator_emulate_tick_tock_transaction
|
||||
_transaction_emulator_destroy
|
||||
_emulator_set_verbosity_level
|
||||
_emulator_config_create
|
||||
_emulator_config_destroy
|
||||
_tvm_emulator_create
|
||||
_tvm_emulator_set_libraries
|
||||
_tvm_emulator_set_c7
|
||||
_tvm_emulator_set_config_object
|
||||
_tvm_emulator_set_prev_blocks_info
|
||||
_tvm_emulator_set_gas_limit
|
||||
_tvm_emulator_set_debug_enabled
|
||||
|
@ -22,3 +26,4 @@ _tvm_emulator_send_external_message
|
|||
_tvm_emulator_send_internal_message
|
||||
_tvm_emulator_destroy
|
||||
_tvm_emulator_emulate_run_method
|
||||
_emulator_version
|
||||
|
|
402
emulator/test/emulator-tests.cpp
Normal file
402
emulator/test/emulator-tests.cpp
Normal file
|
@ -0,0 +1,402 @@
|
|||
#include "td/utils/tests.h"
|
||||
|
||||
#include "block/block-auto.h"
|
||||
#include "block/block.h"
|
||||
#include "block/block-parse.h"
|
||||
|
||||
#include "crypto/vm/boc.h"
|
||||
|
||||
#include "td/utils/base64.h"
|
||||
#include "td/utils/crypto.h"
|
||||
#include "td/utils/JsonBuilder.h"
|
||||
|
||||
#include "smc-envelope/WalletV3.h"
|
||||
|
||||
#include "emulator/emulator-extern.h"
|
||||
|
||||
// testnet config as of 27.06.24
|
||||
const char *config_boc = "te6cckICAl8AAQAANecAAAIBIAABAAICAtgAAwAEAgL1AA0ADgIBIAAFAAYCAUgCPgI/AgEgAAcACAIBSAAJAAoCASAAHgAfAgEgAGUAZgIBSAALAAwCAWoA0gDTAQFI"
|
||||
"AJIBAUgAsgEDpDMADwIBbgAQABEAQDPAueB1cC0DTaIjG28I/scJsoxoIScEE9LNtuiQoYa2AgOuIAASABMBA7LwABoBASAAFAEBIAAYAQHAABUCAWoAFgAXAIm/VzGV"
|
||||
"o387z8N7BhdH91LBHMMhBLu7nv21jwo9wtTSXQIBABvI0aFLnw2QbZgjMPCLRdtRHxhUyinQudg6sdiohIwgwCAAQ79oJ47o6vzJDO5wV60LQESEyBcI3zuSSKtFQIlz"
|
||||
"hk86tAMBg+mbgbrrZVY0qEWL8HxF+gYzy9t5jLO50+QkJ2DWbWFHj0Qaw5TPlNDYOnY0A2VNeAnS9bZ98W8X7FTvgVqStlmABAAZAIOgCYiOTH0TnIIa0oSKjkT3CsgH"
|
||||
"NUU1Iy/5E472ortANeCAAAAAAAAAAAAAAAAROiXXYZuWf8AAi5Oy+xV/i+2JL9ABA6BgABsCASAAHAAdAFur4AAAAAAHGv1JjQAAEeDul1fav9HZ8+939/IsLGZ46E5h"
|
||||
"3qjR13yIrB8mcfbBAFur/////8AHGv1JjQAAEeDul1fav9HZ8+939/IsLGZ46E5h3qjR13yIrB8mcfbBAgEgACAAIQIBIAAzADQCASAAIgAjAgEgACkAKgIBIAAkACUB"
|
||||
"AUgAKAEBIAAmAQEgACcAQFVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVAEAzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMwBAAQEBAQEBAQEBAQEB"
|
||||
"AQEBAQEBAQEBAQEBAQEBAQEBAQECASAAKwAsAQFYAC8BASAALQEBIAAuAEDv5x0Thgr6pq6ur2NvkWhIf4DxAxsL+Nk5rknT6n99oABTAf//////////////////////"
|
||||
"////////////////////gAAAAIAAAAFAAQHAADACASAAMQAyABW+AAADvLNnDcFVUAAVv////7y9GpSiABACASAANQA2AgEgADcAOAIBIABCAEMCASAATgBPAgEgADkA"
|
||||
"OgIBIAA+AD8BASAAOwEBIAA9AQHAADwAt9BTLudOzwABAnAAKtiftocOhhpk4QsHt8jHSWwV/O7nxvFyZKUf75zoqiN3Bfb/JZk7D9mvTw7EDHU5BlaNBz2ml2s54kRz"
|
||||
"l0iBoQAAAAAP////+AAAAAAAAAAEABMaQ7msoAEBIB9IAQEgAEABASAAQQAUa0ZVPxAEO5rKAAAgAAAcIAAACWAAAAC0AAADhAEBIABEAQEgAEUAGsQAAAAGAAAAAAAA"
|
||||
"AC4CA81AAEYARwIBIABVAEgAA6igAgEgAEkASgIBIABLAEwCASAATQBdAgEgAFsAXgIBIABbAFsCAUgAYQBhAQEgAFABASAAYgIBIABRAFICAtkAUwBUAgm3///wYABf"
|
||||
"AGACASAAVQBWAgFiAFwAXQIBIABgAFcCAc4AYQBhAgEgAFgAWQIBIABaAF4CASAAXgBbAAFYAgEgAGEAYQIBIABeAF4AAdQAAUgAAfwCAdQAYQBhAAEgAgKRAGMAZAAq"
|
||||
"NgIGAgUAD0JAAJiWgAAAAAEAAAH0ACo2BAcDBQBMS0ABMS0AAAAAAgAAA+gCASAAZwBoAgEgAHoAewIBIABpAGoCASAAcABxAgEgAGsAbAEBSABvAQEgAG0BASAAbgAM"
|
||||
"AB4AHgADADFgkYTnKgAHEcN5N+CAAGteYg9IAAAB4AAIAE3QZgAAAAAAAAAAAAAAAIAAAAAAAAD6AAAAAAAAAfQAAAAAAAPQkEACASAAcgBzAgEgAHYAdwEBIAB0AQEg"
|
||||
"AHUAlNEAAAAAAAAAZAAAAAAAD0JA3gAAAAAnEAAAAAAAAAAPQkAAAAAAAhYOwAAAAAAAACcQAAAAAAAmJaAAAAAABfXhAAAAAAA7msoAAJTRAAAAAAAAAGQAAAAAAACc"
|
||||
"QN4AAAAAAZAAAAAAAAAAD0JAAAAAAAAPQkAAAAAAAAAnEAAAAAAAmJaAAAAAAAX14QAAAAAAO5rKAAEBIAB4AQEgAHkAUF3DAAIAAAAIAAAAEAAAwwAATiAAAYagAAJJ"
|
||||
"8MMAAAPoAAATiAAAJxAAUF3DAAIAAAAIAAAAEAAAwwAehIAAmJaAATEtAMMAAABkAAATiAAAJxACAUgAfAB9AgEgAIAAgQEBIAB+AQEgAH8AQuoAAAAAAJiWgAAAAAAn"
|
||||
"EAAAAAAAD0JAAAAAAYAAVVVVVQBC6gAAAAAABhqAAAAAAAGQAAAAAAAAnEAAAAABgABVVVVVAgEgAIIAgwEBWACGAQEgAIQBASAAhQAkwgEAAAD6AAAA+gAAA+gAAAAP"
|
||||
"AErZAQMAAAfQAAA+gAAAAAMAAAAIAAAABAAgAAAAIAAAAAQAACcQAQHAAIcCASAAiACJAgFIAIoAiwIBagCQAJEAA9+wAgFYAIwAjQIBIACOAI8AQb7c3f6FapnFy4B4"
|
||||
"QZnAdwvqMfKODXM49zeESA3vRM2QFABBvrMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzMzM4AEG+tWede5qpBXVOzaq9SvpqBpwzTJ067Hk01rWZxT5wQ7gAQb8a"
|
||||
"Yme1MOiTF+EsYXWNG8wYLwlq/ZXmR6g2PgSXaPOEegBBvzSTEofK4j4twU1E7XMbFoxvESypy3LTYwDOK8PDTfsWASsSZn08y2Z9WOsAEAAQD/////////3AAJMCAswA"
|
||||
"lACVAgEgAJYAlwIBIACkAKUCASAAmACZAgEgAJ4AnwIBIACaAJsCASAAnACdAJsc46BJ4rulpzksHMZaJjfdtBExV1HRdikp9U7VlmJllrEaW2TYAFmAXnBlZIRH4Sqp"
|
||||
"CbKkE6v60jyawOEYfVWJDgHg5kDaLMWq7kWQy6AAmxzjoEniuRloX7kgG9FNmRyw/AB/KERuToZdY5v8AHv9JJ8bCIKAWYBecGVkhEt/mk7tOEXbKUWuqIz/1NliY9sm"
|
||||
"KNHFQimyb79WXudTIACbHOOgSeK0/SaSD6j2aEnWfmW/B7LOQBq2QiiBlnaLIzfq+J2HM0BZgF5wZWSEWPYUSh0McOyjsLL8prcsF5RNab+7jLN/5bOme1r98c8gAJsc"
|
||||
"46BJ4rT4ptGRb52wRyHzhe/A8y/IQOC/W5R5aC6/l1IM4f/EgFmAXnBlZIRmDW7+WN70SpQsfX5DetODFOpW6zjCBx7cDf6E+rEipKACASAAoAChAgEgAKIAowCbHOOg"
|
||||
"SeKqqZCAjJ16vfAa2GI9Dcp/I9zBTG2CwPqbx22lq00uLoBZgF5wZWSETeqWp7jqIGPuCYnPZSlQ1fMuSS4e1gF/i9uIeD8GEkNgAJsc46BJ4rugeQAFCtwRUJhvWRbx"
|
||||
"smlpXTdXCio8SJSBdH/6VPCkAFmAXnBlZIRQPeE6JpjzEwkPI2mvCM1sDTcny96f2dhZ2DcBQmmywCAAmxzjoEnimDpTGClVkh/V+/mJmKVKEpdp4MvFgP5onw6saJRD"
|
||||
"QApAWYBecGVkhElWAHSIgIhlXt+lUyQjmndd50temeILBd7WJwjjWBeIIACbHOOgSeKtcjPEr2gq3gMraY11K9Ikv1SPcVaj3veDWrY1o4nxKcBZgF5wZWSEabqKQLtX"
|
||||
"PIkaYDaKvupB8EOxFDWpuMaJJVqafjw4h4sgAgEgAKYApwIBIACsAK0CASAAqACpAgEgAKoAqwCbHOOgSeK8POt5lMj96a3WrXWw7peFtWWh5oi9wsZqXRsrnHM4eoBZ"
|
||||
"gF5wZWSEXlJk0ILG3LG9zsmxXf+r2OTayqr9FSKLBt9LJAow+aBgAJsc46BJ4qjb23m1w/0EvFl179XCQUUMk32z0kjSh+t6V2jnnqeFwFmAXnBlZIR2KWk8cqZgC06K"
|
||||
"AphhfzE3VceQWtppAGEbybk06szO9KAAmxzjoEnihVEG74vb19K1l5o8WtWa0dH/gTPfytoA1LsVXR3ztfgAWYBecGVkhEVHN0AzKnDpKLX5P7Tnay/Ogc4rxeoks/yh"
|
||||
"U3aWhEnGIACbHOOgSeKNl8PpsnZjGIy1CTzi01K8MhvQAEhGlzUDwj2ACC/yFUALGRulQuFOdHw2ulDcYktF860U0mFOYFaQPC7MVNbEeSsk45C9tSPgAgEgAK4ArwIB"
|
||||
"IACwALEAmxzjoEnivAzuiTw+hkcXtw4XyJGYavfPayk6ehceV8FqrxrzKbQACMou1fGNuRpwF6ilPaS03+BSsz0YID1gpIkGozQp7gRFcQsyZFvVYACbHOOgSeKsoYF9"
|
||||
"T9f0ArrtFxbViCRmpw2DsDzrllY35uHzP9DEosAICQwVUUQOx01jZ84Uy8ccqQ90Ml6tj5Sw14wOK055ds2sYSPy532gAJsc46BJ4piyhqkrUrk/KUOony6llV0S+DnZ"
|
||||
"xDLdccZzKJ7bV+XiAAeBJKPSjdajMGMdZwRvewwnwsyc/7uHN718Pd8cHn7VQG1i9BJSeaAAmxzjoEnihY8aTVKeJnW4JHbfVPfkJwElQXxxqG94pNWmN6n9I5jABA51"
|
||||
"90xtZChBtmQcmPHlOmtU6aLeZ+HBY7/jW6AMz26cNcymYyIuIAErEmZ9WOtmfXULABAAEA/////////3wACzAgLMALQAtQIBIAC2ALcCASAAxADFAgEgALgAuQIBIAC+"
|
||||
"AL8CASAAugC7AgEgALwAvQCbHOOgSeK5Nyl3TF7AOD2UwhNOh+y3h9P5e0emd2zjffbNatQR1EBS4qdSDsPAZjIVSudNcsvyCAIbiOyNPYmj/MJG5lMjVLkYt4TIEDCg"
|
||||
"AJsc46BJ4q0qr9PzfnnT+A41FG5Owo+9L+LsuT6PrQkuoR7XsLMzgFLioMqMr4sLf5pO7ThF2ylFrqiM/9TZYmPbJijRxUIpsm+/Vl7nUyAAmxzjoEnisgCK09re8agW"
|
||||
"Ee8S6q329jm1WbZoHBHjO9oP0q3qItiAUuKgyoyviwfhKqkJsqQTq/rSPJrA4Rh9VYkOAeDmQNosxaruRZDLoACbHOOgSeKeKPVNUBZ96hhTOP8lp1kiAm2wfuT0HIxn"
|
||||
"lw/0cyISP8BS4qDKjK+LGPYUSh0McOyjsLL8prcsF5RNab+7jLN/5bOme1r98c8gAgEgAMAAwQIBIADCAMMAmxzjoEnip+PTCe8vsapzyPHm88uO5qKBwt9yvn+S6aJW"
|
||||
"OlcBqeDAUuKgyoyviyYNbv5Y3vRKlCx9fkN604MU6lbrOMIHHtwN/oT6sSKkoACbHOOgSeKwOTDV9phg7jYWvy7bbTD8N773bX9y1P7lxC7vtvdbvsBS4qDKjK+LDeqW"
|
||||
"p7jqIGPuCYnPZSlQ1fMuSS4e1gF/i9uIeD8GEkNgAJsc46BJ4opGGis7tEqqLAW2742I2ugw5S5lFxeYpc4D9f/qbOMhwFLioMqMr4sQPeE6JpjzEwkPI2mvCM1sDTcn"
|
||||
"y96f2dhZ2DcBQmmywCAAmxzjoEniqGUvGQXdvzVXTq/g3DpDkom5aqVipETXzq2o+FZdGDfAUuKgyoyviwlWAHSIgIhlXt+lUyQjmndd50temeILBd7WJwjjWBeIIAIB"
|
||||
"IADGAMcCASAAzADNAgEgAMgAyQIBIADKAMsAmxzjoEnihA6ouVC73YehzpHoNBKL8q3Gp4YbwxOBhJdxpNWePHwAUuKgyoyviym6ikC7VzyJGmA2ir7qQfBDsRQ1qbjG"
|
||||
"iSVamn48OIeLIACbHOOgSeKr2ACjLl9IlajrtDqvMLD+lfOMRQvmZAaL2NVDooVPYQBS4qDKjK+LHlJk0ILG3LG9zsmxXf+r2OTayqr9FSKLBt9LJAow+aBgAJsc46BJ"
|
||||
"4oohDH+XJf2EoPKNkp+gv/WG2UonjUWXV+B/IvWUldUuQFLioMqMr4s2KWk8cqZgC06KAphhfzE3VceQWtppAGEbybk06szO9KAAmxzjoEnilP2IvoMbkK7LwTeBBX8u"
|
||||
"dYI608SRo4nDIg7XUWQf2CYAUuKgyoyviwVHN0AzKnDpKLX5P7Tnay/Ogc4rxeoks/yhU3aWhEnGIAIBIADOAM8CASAA0ADRAJsc46BJ4qS3beCYCuu47Ohag9xU5wk6"
|
||||
"/1uLtI/5NZ+VaqSyKsGdAApHFgZLFGK0fDa6UNxiS0XzrRTSYU5gVpA8LsxU1sR5KyTjkL21I+AAmxzjoEnivJI7eg6kFGx7dvMX7Xzoog/s5cwHxrcfec5z8/aP/8kA"
|
||||
"CFtq86KYH4dNY2fOFMvHHKkPdDJerY+UsNeMDitOeXbNrGEj8ud9oACbHOOgSeKlwkl68jfkl6kGCq/tElh6bM85sFBPnt7exnkRJq68iQAG+mnlyjEXYzBjHWcEb3sM"
|
||||
"J8LMnP+7hze9fD3fHB5+1UBtYvQSUnmgAJsc46BJ4oYswn2e5gWf+Va6NJ+K8sfz4qIHmVG2ryktqCkE9P8hQAPDhRot06toQbZkHJjx5TprVOmi3mfhwWO/41ugDM9u"
|
||||
"nDXMpmMiLiABASAA1AEBIAD6AQsAtb0+sEAA1QIBIADWANcCA8H4ANgA2QID4fgA+AD5AgEgAPwA/QIBIADaANsCASAA3ADdAgEgAbgBuQIBIAGQAZECASAA3gDfAgEg"
|
||||
"AOAA4QIBIADqAOsAQb7edpH5xbuqiZNqTG9H7flTOIfNiYtDxI5AH4T6G4tcVAIBIADiAOMAQb6U4RvTn2B6e+8nmlEv/eZoRz1YKr3qyDudETjcrMFgKAIBIADkAOUC"
|
||||
"ASAA5gDnAgEgAOgA6QBBvgukN4cHaqlFuawJv/TGaxhU3HU2B5iu8cZPVMOseQOgAEG+K7U1xAKEqaBEZoqjpyAnvSx8Z9jfPTeAR/anR5axvmAAQb4tEpbKJaulevOY"
|
||||
"XQPqlmgiMgHDU6C6X7KRxpFyzPf0YABBvjbzLj0Z1oudyhyW/QhJ0OUxRj9zEM8Y1YUI9Py3ga6gAgFqAOwA7QIBIADuAO8AQb4JmTypqySHVMVJMHWspb3xrs2Lrdy4"
|
||||
"eJ+M7QxpbS4cIABBvgOb8O+4IZEUWqtnRGQ8JpMkMBocpZyk/do3d/9MYnVgAgEgAPAA8QBBvqQeZ13QP0lszxNKt380fCWuaV94vwC/bfuqmrlg1/fIAgEgAPIA8wIB"
|
||||
"IAD0APUAQb4G2ph6AS/mD/+cIv4aIYm1z5jAgCW/TTDEr72ygXOP4ABBvhBZkdUWyc1zdg9Fhp9QSsWD+LSyXChKLJOiMF3rVNqgAgEgAPYA9wBBvhsYuojZc90oYnM2"
|
||||
"WQ+c6cHdiTDRBD2UgxkJlbkZa+mgAEG9wBVbqgGsx1Pog5dkmDyUl4VIe1ZME2BEDY6zMNoQYsAAQb3R4obtqmXfb1H2NxdElqeDuWD4d+Y73ozNJ7dE4jGfQAIBIAHw"
|
||||
"AfECASACGAIZAQPAwAD7AFWgESjR4FjxyuEAXHMvOQot+HG+D9TtSQavwKbeV09n3G92AAAAAAAAAH0QAgEgAP4A/wIBIAEcAR0CASABAAEBAgEgAR4BHwIBIAECAQMC"
|
||||
"ASABEAERAgEgAQQBBQIBIAEIAQkCAWIBBgEHAEG+tp/96j2CYcuIRGkfljl5uv/Pilfg3KwCY8xwdr1JdqgAA97wAEG99o5GkuI7pwd5/g4Lt+avHh31l5WoNTndbJgd"
|
||||
"dTJBicACAUgBCgELAgEgAQwBDQBBvgIKjJdXg0pHrRIfDgYLQ20dIU6mEbDa1FxtUXy9B6rgAEG+Cev2EcR/qY3lMYZ3tIojHR5s+wWySfwNg7XZgP23waACASABDgEP"
|
||||
"AEG+fZGfOd+cHGx01cd8+xQAwUjfI/VrANsfVPw1jZFJhTAAQb4y2lPdHZUPm695Z+bh0Z1dcta4xXX7fl6dlc2SXOliIABBvhfW5EoZl/I8jARohetHRk6pp1y3mrXR"
|
||||
"28rFYjHHtJCgAgFqARIBEwIBIAEUARUAQb4zE+Nef80O9dLZy91HfPiOb6EEQ8YqyWKyIU+KeaYLIABBvgPcWeL0jqPxd5IiX7AAYESGqFqZ7o60BjQZJwpPQP1gAgEg"
|
||||
"ARYBFwBBvofANH7PG2eeTdX5Vr2ZUebxCfwJyzBCE4oriUVRU3jIAgEgARgBGQIBIAEaARsAQb4btDCZEGRAOXaB6WwVqFzYTd1zZgyp15BIuy9n029k4ABBvimf97Kd"
|
||||
"WV/siLZ3qM/+nVRE+t0X0XdLsOK51DJ6WSPgAEG+CQrglDQDcC3b6lTaIr2tVPRR4RlxVAwxYNcF+6BkvaAAQb4mML93xvUT+iBDJrOfhiRGSs3vOczEy9DJAbuCb7aU"
|
||||
"4AIBIAFAAUECASABYAFhAgEgASABIQIBIAE0ATUCASABIgEjAgFYAS4BLwIBIAEkASUAQb6L1UE7T5lmGOuEiyPgykuqAW0ENCaxjsi4fdzZq2D0GAICcAEmAScCASAB"
|
||||
"KAEpAD+9QolK/7nMhu3MO9bzK31P7DqSFoQkLyeYP3RWz5f3KwA/vVaiOV3iXF+2BW0R7uGwqmnXP7y0cjEHibQT6v4MssECASABKgErAgV/rWABLAEtAEG96YUi7d3r"
|
||||
"hTwVGwv/pocif6dNQ6DcZ3JVzvqdhFltQ0AAQb3zT7C1dlWQlR1QmfrLfaGi5Sj94Guq/gLQXakuFmoVwAA/u8n6yK+GpbUUdG9dja4DHHLGGEu5ZXb6rUHFOFMS7kAA"
|
||||
"P7v3dUiUhgaZGC+mdUGyJEzagm0IMNe3d2Q1lCRBTK5AAEG+co6LJmQv3h46OSV3KsT2gWyv6MLPKOrfIXFt86dsXVACASABMAExAEG+KQF+kzAAZybpH/1z1zYof09W"
|
||||
"YAAY6MbQHDj3AO9dCGACASABMgEzAEG9xJZFhUbajV1FgRPu0X8LSHY3DIBRmI4wC6uLpNG5lkAAQb3/+UXNzozn7Eb1PsCLs8NaD2VhG+9qBBlvLJG76KkTQAIBIAE2"
|
||||
"ATcCASABPgE/AgEgATgBOQIBYgE8AT0AQb5l6UC6/ZmwRTHlWwthzsJcYx+8Vj2vmom9/nu617FmkAIBIAE6ATsAQb4J64Df7Vfb8/jmlGnsZByGAdCsEWA/FfWXyVEU"
|
||||
"5d6CoABBvhv0Q/VEAfHxjnYRJRxb6xtGetqoO1OgjstzC/3Ok41gAEG964EWqVOQS0JWHUcxnAz6STWs7+BsROmocJCo+xmqe0AAQb3vR9oRALXcwLQPRb70F/gP7SAV"
|
||||
"WqyMgCIasOqw+b47wABBvpbvxWd5+q2vJUVqR9AlbEIfdFysLR0PXGgVlBf8x5hYAEG+j9bgcxjKxRmfMrJEC6BbHTCQ+WNXqC3H+z591gZw0AgCASABQgFDAgEgAUgB"
|
||||
"SQIBSAFEAUUAQb7KkreZXaSZXSPGxbgwuJddzpWJly3MFNYwALkyQcIdDABBvnLW0BTZocy0D6h48ehPtgqA0XqNxrqB86bTTks9uvuQAgEgAUYBRwBBvjYzcOXWIfyk"
|
||||
"HqSDt3m92Hacz/XRoWD5F4yy0AQ/E0ogAEG+AShOVhiiJZ6Itzjs8O75CiiF+eXloz74MSVsHpPAMiACASABSgFLAgEgAVABUQIDeuABTAFNAgFYAU4BTwA/vVuDIbt9"
|
||||
"1w2Z2FpLSOsyAUPo2ovei28SxaHKDSUdRz0AP71qm4D4evL40x1qJi6AGLh6oOBtxFr5bgc8Xr8jaeWRAEG+HzK7ymUhDh5PL//pLHqwaYidq3sym7hIWC32Rqol+mAA"
|
||||
"Qb41DOvSox2jnjN40ZFtUSQhSJMCyEWhBRdRERRSltibIAIBIAFSAVMCASABWAFZAgFYAVQBVQIBIAFWAVcAQb3cHJ+brtBSsROnSioWNJqFxZ+5hIGX7ta5KuhleBFn"
|
||||
"wABBvf/lQA5TJrGDmv6EqacNl5j6ktTzbQOEGqpl45xcekNAAEG+Nve9GdRJhn/t0fgYe7d1pkTBxa2AfiXcWeRYqE1K3yAAQb4jrXHoxDyh1ZYGBdBoQgLaScxW6pZR"
|
||||
"1hEhJC8BqF+5IAIBIAFaAVsCAVgBXgFfAEG+CdErMSfFYmEK9J9XimJDXyszQjtVELtHIXQt7AvQjKACAUgBXAFdAEC9ivFB4bA7PAP0VXnTs784TO/4CoWLb1QqRdyr"
|
||||
"0orLAgBAvb5z8xm2yt/HlB1G9TB2Qna4rVgzGxI/n4z3UYr3a7gAQb3f0PQO3/nU5ypuXD5/SaZboj2RhZjd5z47o7VM8AjDwABBvfGIqWXxgi7mCltWrYf4pQa2aRZP"
|
||||
"FvMA8LBV1hmpauDAAgEgAWIBYwIBIAGAAYECASABZAFlAgEgAXIBcwIBIAFmAWcCAVgBcAFxAgFIAWgBaQIBIAFqAWsAQb33dj2qlHUSOf2DkiVrVwhcqy3SkE9YbBfn"
|
||||
"zU07vK+uwABBvdxiQ8Yt/Lb9BztkNe9dyXuUyTOcKJRlF9BteI2LK99AAgEgAWwBbQBBvjxAsXZAtTQoMwJV27nrzNCyFum1aU1fbygeFMFuYX9gAgFIAW4BbwBBvdro"
|
||||
"odCnIayUb5VXYFh23qJGAE4Oed7iqqU/L0iFAPpAAD+9QlUpU0rFnXRmWi3ZnIsFtIIm3JDSdtVPEGqGefBt/wA/vWGl+1GrGASEj3GaAizvMOXDl69yZpcU2YUtCHfG"
|
||||
"jLUAQb4d/oR88TrfAGcKrMn44T3wBnbh3TWVQWr8rVq0bYTnYABBvhpY6fA3+apwMQXdpEMu8s8uFXf+625mtfciMt0dh4LgAgEgAXQBdQIBIAF4AXkAQb5d0CvPvsyC"
|
||||
"ZxuTbUe5O2PtTudCwtgc3Ou4DMuX2WizEAIBSAF2AXcAQb3BrlEdo+Hw0uZZJxCgCdxWs/njs6bTHuprY7HtqNl0QABBvcSsc0L20So00ByQZ2oo0aUWf4BlreuHcpYk"
|
||||
"R/C5Av7AAgEgAXoBewIBIAF+AX8CASABfAF9AEG+ErNElODwkPB+KvEKqCtCz8CS5HCcsC8/VoJGV5f0+uAAQb3FCW/Cy20jtvAS0j4k9eQvRg9tcpaQgFnHc5cB7Fdv"
|
||||
"wABBvc5nMn9h2c6FeqzonvA74SwaTxZXTgLEXOKOIFOki9BAAEG+NkNRDvICKDQNaqBlpx1LnSn5qpShA00BPg8Tfv+LHaAAQb4+0zsN9j+Lxs1EvbGG0fMwbeeqbWlx"
|
||||
"TzyjV4LE+0uJYAIBIAGCAYMCAUgBigGLAgEgAYQBhQIBIAGGAYcAQb5O+6O6Y7dWb4HOnMBK4fZ7QNo9woEzBIeKd5+K08xlkABBvlwlLor18dZ5/O3AomXxI5hxYM4o"
|
||||
"J1Xrrx0JChLVxHpQAgFYAYgBiQBBvn9hAM+g43TTR8vOvZfnhX3kPBCgPp3T0+YF+Ai6RFHwAEG99KmZCgwzysLzIR2TNaJdbyX4lKduOMlCmhCp4L9gJEAAQb3Ntnmm"
|
||||
"W4yzmAdiAYg7sNjoD8sCiWIvgvkpuYpTXcyiQAIBZgGMAY0CAW4BjgGPAEC9hzviVxD170gIZfsWPGFKfbOB6LCP5YhH7I7fWz7wdwBAvaey9kbu3gkPDYYEraB8b3sF"
|
||||
"UrCgg4ask3C+O8UJ1mkAQL2wAL6FGQaCTbDdEwGUJ82TDpVMLoNr4ZGZWxcofghZAEC9lqzgehIXoMRj58vAWaHnNAi6UXEU5Ce942dJqf4HawIBIAGSAZMCASABqAGp"
|
||||
"AgEgAZQBlQIBIAGkAaUCASABlgGXAgEgAZwBnQIBagGYAZkCASABmgGbAEC9syAieemf3vF3umY0lCaQxLhwvbTFuL8eQxPYrpeZ8ABAvbl6reyIsCKH2fq2I8+oEnkS"
|
||||
"4xYy3RUH/7ka152WrisAQb4CJHgAcs+wQzgf/9IPKdknw/ej0Z+Q+n3BtSEKi0hIoABBvgqovnD/owP5nsA4G62765H5klOyA1TV+7jriGf2CtjgAgFYAZ4BnwIBIAGg"
|
||||
"AaEAQb3dAG8Nta3/iYiTymgGxV0CfKQlN6UlidHeNgbvtMT9wABBve7An2cFgShRoZx3xA7hUDRtwbcLae0x4dPQQlAH8o3AAEG+HDeG9ZNvkzq3wDDpGt0cb5cHHFQ0"
|
||||
"itHD3s5R2YHy8eACAWIBogGjAD+9ewqjet2JVaCzHa8NXfnW3ZtLEzEASpk9eicyztCrvwA/vXDzaFNMjF1BnqMojulsIHfT2Dj1ltCTVvoe8wu+GKcCASABpgGnAEG+"
|
||||
"un2oV7CbmRhYGc7tLiCXj/L40+4ZlzvlmEnZPxyuQrgAQb5ElmikSUchX0lT+0ASVhwF0OBnUB8X4TD4m4/v2Dfl0ABBvlBR7mcUQO8IfN+DkkDYHF1reSJZhv08w6k+"
|
||||
"JIA6ITiwAgEgAaoBqwIBIAG0AbUCAVgBrAGtAgEgAbIBswBBvhX0m4apMW/GEDxtnd+z0ug75voHd+OibSQbA2+tUPigAgEgAa4BrwIBWAGwAbEAQb3WKikPb9a/J2ti"
|
||||
"V6yOhNUW5BivimV3gM+EI3VAxst6QAA/vUeSH4ZL+7V8eQBEF/0lm/ouIJ+wQs5QTzBpsSHSXLcAP71t4YT+jYHLpx5Gv3HFoOzL5rhg0Ukud8G3adF8AYlRAEG+Zf0n"
|
||||
"TrwaPPTPlLjegNsGkoz7UV5wz7oYQet9+SNmRfAAQb5m0tqyXFYp4ntucDLTwJV1gxwoh6JoJL1Y0rfwfLQhUABBvqSCHVak+jIc9ANutTAfHpZNM3YdGky7yaDzsTrg"
|
||||
"0WhIAgN9eAG2AbcAP70AGCAXHtaQJNqiST0rNTs8mUZSo5H6vM7gvA+3q7+iAD+9FgzFlOZUrfRtonCQzjDSFzrRv4l/94TFs9oi+RQ6kgIBIAG6AbsCASAB1gHXAgEg"
|
||||
"AbwBvQIBIAHKAcsCASABvgG/AgEgAcQBxQBBvqg93lUVxmlCEks5kL8jTFcqg8lElfAi8dSee8j2jFDIAgEgAcABwQICcwHCAcMAQb5gqEQiOqBKE6++9fJCR6LRVtNC"
|
||||
"cE9MFknXFlF0leXQMAA/vWDgwPyHRVDvZl2iYgjJ3nWePRW2wjoUWAxrbgzB5a8AP71vi5ua8R9Xas7ZJOxnHw9u9q/5yyOmKiac4YXhpzZdAEG+s1A7ERdFjokIunFC"
|
||||
"SgeOxki+V8FwbGaF2nFzHDuF3TgCASABxgHHAEG+VoZmB1FqSlGFLPm5r9LBLAX67F6BFQLDlwahNArjz1ACAnIByAHJAD+9QiJtY3MezTL7KB0xvFikeKH4EL/XSXL0"
|
||||
"b7P1FoVCXwA/vWinW8a2SNxgyMi+e0ML00BiBRy4kZh/JQrAHMZZ3Y0CASABzAHNAgEgAdIB0wIBWAHOAc8CBX+rYAHQAdEAQb4MUGwt25IQd3/yHjI03F71G8Kp2GMa"
|
||||
"MEv2TiWoTKbs4ABBvjfgYNaJyJijra4RuhLyyPeGUpRcBZhwzdStzQ2MIyDgAD+8XsswC94XkGKDsoUR3B73WxXRX2LdrWSok77uwX/c8AA/vF/xbT+aFbepxFKzgZQ9"
|
||||
"HbF9uy1KEVspm2/20klhldAAQb6ORoMEHrkmcAR+9ntDkAj0Hq6gLGUT0ceglU8Tm9jfuAIBIAHUAdUAQb5A/TMaqnaKx2BBvcxafTpwUxZYRXcKXTAZj80OapRScABB"
|
||||
"vm8iGJqmHDhbx34EGjoh2YHhU4mpC/HVkmnz7NBQA0LwAgEgAdgB2QIBIAHmAecCASAB2gHbAgEgAd4B3wIDeqAB3AHdAEG+rC9orZ39Jto92k4zrR5989Z4qySyANXA"
|
||||
"U8TLG5+0zfgAP71bgmShTXyEATbw0sECEmtwNtuzKI+S3DHEAPCPRhvTAD+9YC74p2ZuEIcz5A4sE69a7MTFuARvrmQnzUDgc7Mo3QIBIAHgAeECA3jgAeQB5QBBvlnO"
|
||||
"v0cNQ7XgFJEwo9boghCVUHzfZ+urQtJh6esRW5xQAgFqAeIB4wBAvYY1sTf2ZnuWrkRZ+aijWbaH+q5ZMHkghn/Ys+tCZhoAQL2mLfoqMZw77ln7oAn0Cna+Bkp/snNw"
|
||||
"xHgR2MTl/uqVAD+9XiSecyAvpnbNK3Z28HAfLhXvbXN59PmK+A7M2VDdAwA/vVcEpETq6AblfmVHtN91B7GNEyGglVc2447ooPciTZMCAUgB6AHpAgEgAe4B7wIBIAHq"
|
||||
"AesAQb5J79ZyWgm+nqrXs6x0I4wkPiKQBH28C7RWNfPTqAfu8ABBvga7i8W/V7fCfyaKf+LLs48ld6A5hMVDltkVnlrlk+IgAgFYAewB7QBAvZIZkLzw7YHDbLe+Scl6"
|
||||
"3uhdXfRwOUa0JHwJvuhGG3kAQL2a+QtRGkljjF6hjiME0j7LnnMjJkDh6mYBahv3SgufAEG+q3Z1cONnEXUOq6coX7x0RaK8l2WJj/QViIJee2G6qcgAQb6p4a4p479A"
|
||||
"eC04K9HUR0x8B9TDrIBoSgVyWXe7xEjGWAIBIAHyAfMCASACBAIFAgEgAfQB9QIBIAH6AfsCAUgB9gH3AEG/JvWFCk64ubdT7k9fADlAADZW2oUeE0F//hNAx5vmQ24C"
|
||||
"ASAB+AH5AEG+ortA8RL/qsRfVCCcmhh9yV+abEsHsmRmSDIyM5jiKZgAQb52rnetuJmLxwetwRXlQ8SwkzMrIHn9f1t+3vxypn8ikABBvlRRrWQUSUCo75+dTtj6fP1U"
|
||||
"VTmV5DEujv1TIAc3ZLZQAgFYAfwB/QIBIAH+Af8AQb6OgDPbFGfKzqixWPD2Hmgt4G6KWUdQTJBPH3A9K+TZ6ABBvoMGKypw006AeRYqimLjmY2Ufp+SHk8C0ZJBNgVB"
|
||||
"lzw4AgFqAgACAQIBWAICAgMAQb4FNJ5NJO4+0QwlVAWckUZXdk+PfYDexDZ1+ju9SxhF4ABBvjxQpfN455vPpJ/T+t2rtlKCE9X6KviHFRV802gCPe5gAEG+eMP12XnW"
|
||||
"n0wTl6XmbgClnjYFM2JY2UAZYhUaknKJf3AAQb5WLKPfVeykQ1NoeXCT+51aWRbOsYTKmyd3AQSzEZ39EAIBIAIGAgcCASACDAINAgFYAggCCQIBIAIKAgsAQb68pxxy"
|
||||
"oAcWOvpflv3VjfgrRk9v44uazdxMziPqfc1hGABBvqK0CHqoBidcEUJHx4naV3TtgmUv1oEhGpt3DFLGnncoAEG+xnddXOiUNI6DJEK4qY1Cxoa8Hl6iQkWXMWUwTPTo"
|
||||
"H6wAQb72G1Ke4q6X03mCI87z+qVMO/gd+xvXv6SSwdWpfbnvjAIBIAIOAg8AQb8B8+e/xOcnn+D3yL8SGkEf/SXAx3pRSH/Lf3UDC6zxGgIBIAIQAhEAQb7an34AE4Mg"
|
||||
"4PeqZAW6F6j/JbgFl8egPBFDGYC5dIgrvABBvpMd78gzSiVsK0zz0AHtEja8x1UoB/NDZMjn+l86NQK4AgFYAhICEwIBIAIUAhUAQb4zj6RBc4mQ6p3ng7mGJ7tp7Mbz"
|
||||
"ERhe7obkM9A0wnCCIABBvcdlWZEG0Xj7uGgLfagzT4G4zmtS/JDEdPQBzOA0r99AAgEgAhYCFwBAvYD00VNmocZyrS8LPuogdwJgYw9wWC7QCKaicnWos7IAQL2UR4JV"
|
||||
"cHfZibOIOqdJm+OTPN6Z1z0bykKu09Up+xc/AgEgAhoCGwIBIAIoAikCASACHAIdAgEgAiYCJwIBWAIeAh8CASACJAIlAEG+pJiW3Qo4nq8pKjVzzfs3/0uJxMmWXYyD"
|
||||
"sduLHtuy8ggCASACIAIhAEG+VOzUzgqzn6yjJdPd2lOP2LQqiZF7O2/LbcmLzMf+hfACAnICIgIjAD+9bmuGAYNACsk0M2FDu866cYUghqLilNK52oLflBoKXQA/vU+c"
|
||||
"jkDnrb+NojfOEJpwm2m9hlmHmr3HOWwyl4LEIcEAQb7xrpmUHCzHHfaaDbiK66LDRKeKblhi4QoTVRthJ2OzbABBvu6d/bOGE/iiKiKq5AGCvcetA3Izw45ihY196+ey"
|
||||
"/BbcAEG/IPVJM6fGP9OC+PczMUdiKPNfwkUrt4eslgzXXEY0qCIAQb8FwRfn4LbYMTzpLsSBuEI3vAaLitADflpdxp+M5JVWtgIBIAIqAisCASACNgI3AEG/OXz/ktGT"
|
||||
"HClb8arzLt3XEjlJTw9LEYxjGvSJNff79loCASACLAItAgFIAi4CLwIBIAIwAjEAQb5bNqQnT8GAdHDnixf9NzTB5VYvmnvaYs6m53KwbxMzsABBvlGslmQWFAphVxFA"
|
||||
"GGIJvfuk/oBpngdzy0sJ8WxmWNSQAgN+ugIyAjMCAW4CNAI1AD+84Hccb00HqhGM3lRQZIZ3QmOuWlRDBQ9+uXRKu1L+hAA/vOLc2o+R4+ofOAQzeQiU06F6MN1nTGWW"
|
||||
"J0eurH869zQAQb36Q2nDRQfZx/XsGJ+z0zYtk4S6OXPZcUASOm420y1FQABBvd9bukINCpKmNEXeA+ve7Mnhp8WSt+MPJFDCUYjDLZ1AAgEgAjgCOQBBvzD0lLSsv1Pi"
|
||||
"WQ0jVDajeXFbJ/TkSakvdy+g0TPR27KGAgFYAjoCOwIBWAI8Aj0AQb53taVCRMwrV1sky/EE45BOJoTTJ0d6vkLZIb6j4k+G0ABBvlKuPPc+sdv9ffRS/Kj+bSQKZFE7"
|
||||
"fT/jbtog/5dYYCCQAEG+ZZdBcxF7VCWJS+ti78o7J2qY+aXyKipCl2P0CfXeUhAAQb5gdZIvzW7H8KDz4y1oKMiuAzlXY+TF7PGVAwUvGCn0UAIBIAJAAkEBA6DAAkwB"
|
||||
"AfwCQgIBIAJDAkQBwbnpmKopRu2n8DHZCDhXCHvJdckI7xw0kBvbb0npdd7jjldXaYBVRMxJsrwBE0/IJ4amdSKh5/Ec0+nZhJr583uAAAAAAAAAAAAAAABtiv/XlkR5"
|
||||
"bE7cmy0osGrcZKJHU0ACRwEB1AJFAQH0AkYBwcaYme1MOiTF+EsYXWNG8wYLwlq/ZXmR6g2PgSXaPOEeN1Z517mqkFdU7Nqr1K+moGnDNMnTrseTTWtZnFPnBDuAAAAA"
|
||||
"AAAAAAAAAABtiv/XlkR5bE7cmy0osGrcZKJHU0ACRwLFAaUkEAuNdJLBIqJ50rOuJIeLHBBTEnUHFMTTlSvkBfBlTSx/ArBlJBChmMwsWi3fU4ek+WJDvjF7AhFPUcNX"
|
||||
"4kaAAAAAAAAAAAAAAAAAJ37Hglt9pn14Z9Vgj9pE3L7fXbBAAkcCTgIBIAJIAkkCASACSgJLAIO/z+IwR9x5RqPSfAzguJqFxanKeUhZQgFsmKwj4GuAK2WAAAAAAAAA"
|
||||
"AAAAAAB7G3oHXwv9lQmh8vd3TonVSERFqMAAgr+jPzrhTYloKgTCsGgEFNx7OdH+sJ98etJnwrIVSsFxHwAAAAAAAAAAAAAAAOsF4basDVdO8s8p/fAcwLo9j5vxAIK/"
|
||||
"n8LJGSxLhg32E0QLb7fZPphHZGiLJJFDrBMD8NcM15MAAAAAAAAAAAAAAADlTNYxyXvgdnFyrRaQRoiWLQnS/gLFAbUl61s8X25tzWBr7nugeg7IMDUhKEm34FWUmcD2"
|
||||
"utVNIR8VdL9iPRR4dwjF/dVl4ymiWr+kkJXphEJvGbzwSXSAAAAAAAAAAAAAAAAAWZG0lbam3LV4+pciTNFehvbNeeLAAk0CTgIBIAJPAlAAMEO5rKAEO5rKADehIAPk"
|
||||
"4cBAX14QA5iWgAIBIAJRAlIAg7/T7quzPdTpPcCght7xTpoi+g9Sw7gtkYDSyaOh0qHc0AAAAAAAAAAAAAAAADavGw+/CvXTnyDIJ6fZU+llAiixQAIBIAJTAlQCASAC"
|
||||
"WwJcAgEgAlUCVgCBv1wad2ywThLttxU0gcwWuSJSuLNadPm8j3J85ggRzjkGAAAAAAAAAAAAAAAB1xLrLNteGQzkOClxdvv3E/l3M5UAgb8JuDCFQxifbIdTfjd1x7Mq"
|
||||
"S+Z7dzIUkHtIdVjcVeFT2AAAAAAAAAAAAAAAAiwal03Yl9B7p2fVDSCtlYsZX6m+AgEgAlcCWAIBIAJZAloAgb7jxvbib0yb3DKvQBDcHL/hdg7NjCuqjUQ09t8hgmhV"
|
||||
"oAAAAAAAAAAAAAAABEGpMZGoNId5F80sBzWgnjo+AP2UAIG+sE8ccijAbmkaBJVfyfgqY5pf4QSO+c5IFGVC9WwlY/AAAAAAAAAAAAAAAAeg08QveVui23B9QhrdMd7a"
|
||||
"nx/sGACBvqxwYOyAk+H0YGBc70gZFJc6oqUvcHywU+yJNBfSNh+AAAAAAAAAAAAAAAADFU5kDFbQI6mIkEJqJNGncvWjiygCASACXQJeAIG/acxhhr+dznhtppGVCg+k"
|
||||
"FqjL65rOddHn1mwyRj1rYgQAAAAAAAAAAAAAAACRfpTwfZ9v81WVbRpRYN+1/m9YhwCBvw9fhTm/NqURBT4FuwJczZWe39F575hmpFtt8KVniCwIAAAAAAAAAAAAAAAB"
|
||||
"DkxuMKeNKjBZpVAjNVjJ/URzwhoAgb8RuD3rFDyNUpuXtBAnWTykKVAuY7UKLrye419st2b25AAAAAAAAAAAAAAAAlUrmS7Amiwb/77tvRUhnpfLLMXeL4vIgQ==";
|
||||
|
||||
|
||||
constexpr td::int64 Ton = 1000000000;
|
||||
|
||||
TEST(Emulator, wallet_int_and_ext_msg) {
|
||||
td::Ed25519::PrivateKey priv_key = td::Ed25519::generate_private_key().move_as_ok();
|
||||
auto pub_key = priv_key.get_public_key().move_as_ok();
|
||||
ton::WalletV3::InitData init_data;
|
||||
init_data.public_key = pub_key.as_octet_string();
|
||||
init_data.wallet_id = 239;
|
||||
auto wallet = ton::WalletV3::create(init_data, 2);
|
||||
|
||||
auto address = wallet->get_address();
|
||||
|
||||
void *emulator = transaction_emulator_create(config_boc, 3);
|
||||
const uint64_t lt = 42000000000;
|
||||
CHECK(transaction_emulator_set_lt(emulator, lt));
|
||||
const uint32_t utime = 1337;
|
||||
transaction_emulator_set_unixtime(emulator, utime);
|
||||
|
||||
std::string shard_account_after_boc_b64;
|
||||
|
||||
// emulate internal message with init state on uninit account
|
||||
{
|
||||
td::Ref<vm::Cell> account_root;
|
||||
block::gen::Account().cell_pack_account_none(account_root);
|
||||
auto none_shard_account_cell = vm::CellBuilder().store_ref(account_root).store_bits(td::Bits256::zero().as_bitslice()).store_long(0).finalize();
|
||||
auto none_shard_account_boc = td::base64_encode(std_boc_serialize(none_shard_account_cell).move_as_ok());
|
||||
|
||||
td::Ref<vm::Cell> int_msg;
|
||||
{
|
||||
block::gen::Message::Record message;
|
||||
block::gen::CommonMsgInfo::Record_int_msg_info msg_info;
|
||||
msg_info.ihr_disabled = true;
|
||||
msg_info.bounce = false;
|
||||
msg_info.bounced = false;
|
||||
{
|
||||
block::gen::MsgAddressInt::Record_addr_std src;
|
||||
src.anycast = vm::CellBuilder().store_zeroes(1).as_cellslice_ref();
|
||||
src.workchain_id = 0;
|
||||
src.address = td::Bits256();;
|
||||
tlb::csr_pack(msg_info.src, src);
|
||||
}
|
||||
{
|
||||
block::gen::MsgAddressInt::Record_addr_std dest;
|
||||
dest.anycast = vm::CellBuilder().store_zeroes(1).as_cellslice_ref();
|
||||
dest.workchain_id = address.workchain;
|
||||
dest.address = address.addr;
|
||||
tlb::csr_pack(msg_info.dest, dest);
|
||||
}
|
||||
{
|
||||
block::CurrencyCollection cc{10 * Ton};
|
||||
cc.pack_to(msg_info.value);
|
||||
}
|
||||
{
|
||||
vm::CellBuilder cb;
|
||||
block::tlb::t_Grams.store_integer_value(cb, td::BigInt256(int(0.03 * Ton)));
|
||||
msg_info.fwd_fee = cb.as_cellslice_ref();
|
||||
}
|
||||
{
|
||||
vm::CellBuilder cb;
|
||||
block::tlb::t_Grams.store_integer_value(cb, td::BigInt256(0));
|
||||
msg_info.ihr_fee = cb.as_cellslice_ref();
|
||||
}
|
||||
msg_info.created_lt = 0;
|
||||
msg_info.created_at = static_cast<uint32_t>(utime);
|
||||
tlb::csr_pack(message.info, msg_info);
|
||||
message.init = vm::CellBuilder()
|
||||
.store_ones(1)
|
||||
.store_zeroes(1)
|
||||
.append_cellslice(vm::load_cell_slice(ton::GenericAccount::get_init_state(wallet->get_state())))
|
||||
.as_cellslice_ref();
|
||||
message.body = vm::CellBuilder().store_zeroes(1).as_cellslice_ref();
|
||||
|
||||
tlb::type_pack_cell(int_msg, block::gen::t_Message_Any, message);
|
||||
}
|
||||
|
||||
CHECK(int_msg.not_null());
|
||||
|
||||
auto int_msg_boc = td::base64_encode(std_boc_serialize(int_msg).move_as_ok());
|
||||
|
||||
std::string int_emu_res = transaction_emulator_emulate_transaction(emulator, none_shard_account_boc.c_str(), int_msg_boc.c_str());
|
||||
LOG(ERROR) << "int_emu_res = " << int_emu_res;
|
||||
|
||||
auto int_result_json = td::json_decode(td::MutableSlice(int_emu_res));
|
||||
CHECK(int_result_json.is_ok());
|
||||
auto int_result_value = int_result_json.move_as_ok();
|
||||
auto& int_result_obj = int_result_value.get_object();
|
||||
|
||||
auto success_field = td::get_json_object_field(int_result_obj, "success", td::JsonValue::Type::Boolean, false);
|
||||
CHECK(success_field.is_ok());
|
||||
auto success = success_field.move_as_ok().get_boolean();
|
||||
CHECK(success);
|
||||
|
||||
auto transaction_field = td::get_json_object_field(int_result_obj, "transaction", td::JsonValue::Type::String, false);
|
||||
CHECK(transaction_field.is_ok());
|
||||
auto transaction_boc_b64 = transaction_field.move_as_ok().get_string();
|
||||
auto transaction_boc = td::base64_decode(transaction_boc_b64);
|
||||
CHECK(transaction_boc.is_ok());
|
||||
auto trans_cell = vm::std_boc_deserialize(transaction_boc.move_as_ok());
|
||||
CHECK(trans_cell.is_ok());
|
||||
td::Bits256 trans_hash = trans_cell.ok()->get_hash().bits();
|
||||
block::gen::Transaction::Record trans;
|
||||
block::gen::TransactionDescr::Record_trans_ord trans_descr;
|
||||
CHECK(tlb::unpack_cell(trans_cell.move_as_ok(), trans) && tlb::unpack_cell(trans.description, trans_descr));
|
||||
CHECK(trans.outmsg_cnt == 0);
|
||||
CHECK(trans.account_addr == wallet->get_address().addr);
|
||||
CHECK(trans_descr.aborted == false);
|
||||
CHECK(trans_descr.destroyed == false);
|
||||
CHECK(trans.lt == lt);
|
||||
CHECK(trans.now == utime);
|
||||
|
||||
auto shard_account_field = td::get_json_object_field(int_result_obj, "shard_account", td::JsonValue::Type::String, false);
|
||||
CHECK(shard_account_field.is_ok());
|
||||
auto shard_account_boc_b64 = shard_account_field.move_as_ok().get_string();
|
||||
shard_account_after_boc_b64 = shard_account_boc_b64.str();
|
||||
auto shard_account_boc = td::base64_decode(shard_account_boc_b64);
|
||||
CHECK(shard_account_boc.is_ok());
|
||||
auto shard_account_cell = vm::std_boc_deserialize(shard_account_boc.move_as_ok());
|
||||
CHECK(shard_account_cell.is_ok());
|
||||
block::gen::ShardAccount::Record shard_account;
|
||||
block::gen::Account::Record_account account;
|
||||
CHECK(tlb::unpack_cell(shard_account_cell.move_as_ok(), shard_account) && tlb::unpack_cell(shard_account.account, account));
|
||||
CHECK(shard_account.last_trans_hash == trans_hash);
|
||||
CHECK(shard_account.last_trans_lt == lt);
|
||||
ton::WorkchainId wc;
|
||||
ton::StdSmcAddress addr;
|
||||
CHECK(block::tlb::t_MsgAddressInt.extract_std_address(account.addr, wc, addr));
|
||||
CHECK(address.workchain == wc);
|
||||
CHECK(address.addr == addr);
|
||||
}
|
||||
|
||||
// emulate external message
|
||||
{
|
||||
auto ext_body = wallet->make_a_gift_message(priv_key, utime + 60, {ton::WalletV3::Gift{block::StdAddress(0, ton::StdSmcAddress()), 1 * Ton}});
|
||||
CHECK(ext_body.is_ok());
|
||||
auto ext_msg = ton::GenericAccount::create_ext_message(address, {}, ext_body.move_as_ok());
|
||||
auto ext_msg_boc = td::base64_encode(std_boc_serialize(ext_msg).move_as_ok());
|
||||
std::string ext_emu_res = transaction_emulator_emulate_transaction(emulator, shard_account_after_boc_b64.c_str(), ext_msg_boc.c_str());
|
||||
LOG(ERROR) << "ext_emu_res = " << ext_emu_res;
|
||||
|
||||
auto ext_result_json = td::json_decode(td::MutableSlice(ext_emu_res));
|
||||
CHECK(ext_result_json.is_ok());
|
||||
auto ext_result = ext_result_json.move_as_ok();
|
||||
auto &ext_result_obj = ext_result.get_object();
|
||||
auto ext_success_field = td::get_json_object_field(ext_result_obj, "success", td::JsonValue::Type::Boolean, false);
|
||||
CHECK(ext_success_field.is_ok());
|
||||
auto ext_success = ext_success_field.move_as_ok().get_boolean();
|
||||
CHECK(ext_success);
|
||||
|
||||
auto ext_transaction_field = td::get_json_object_field(ext_result_obj, "transaction", td::JsonValue::Type::String, false);
|
||||
CHECK(ext_transaction_field.is_ok());
|
||||
auto ext_transaction_boc_b64 = ext_transaction_field.move_as_ok().get_string();
|
||||
auto ext_transaction_boc = td::base64_decode(ext_transaction_boc_b64);
|
||||
CHECK(ext_transaction_boc.is_ok());
|
||||
auto ext_trans_cell = vm::std_boc_deserialize(ext_transaction_boc.move_as_ok());
|
||||
CHECK(ext_trans_cell.is_ok());
|
||||
td::Bits256 ext_trans_hash = ext_trans_cell.ok()->get_hash().bits();
|
||||
block::gen::Transaction::Record ext_trans;
|
||||
block::gen::TransactionDescr::Record_trans_ord ext_trans_descr;
|
||||
CHECK(tlb::unpack_cell(ext_trans_cell.move_as_ok(), ext_trans) && tlb::unpack_cell(ext_trans.description, ext_trans_descr));
|
||||
CHECK(ext_trans.outmsg_cnt == 1);
|
||||
CHECK(ext_trans.account_addr == wallet->get_address().addr);
|
||||
CHECK(ext_trans_descr.aborted == false);
|
||||
CHECK(ext_trans_descr.destroyed == false);
|
||||
|
||||
auto ext_shard_account_field = td::get_json_object_field(ext_result_obj, "shard_account", td::JsonValue::Type::String, false);
|
||||
CHECK(ext_shard_account_field.is_ok());
|
||||
auto ext_shard_account_boc_b64 = ext_shard_account_field.move_as_ok().get_string();
|
||||
auto ext_shard_account_boc = td::base64_decode(ext_shard_account_boc_b64);
|
||||
CHECK(ext_shard_account_boc.is_ok());
|
||||
auto ext_shard_account_cell = vm::std_boc_deserialize(ext_shard_account_boc.move_as_ok());
|
||||
CHECK(ext_shard_account_cell.is_ok());
|
||||
block::gen::ShardAccount::Record ext_shard_account;
|
||||
block::gen::Account::Record_account ext_account;
|
||||
CHECK(tlb::unpack_cell(ext_shard_account_cell.move_as_ok(), ext_shard_account) && tlb::unpack_cell(ext_shard_account.account, ext_account));
|
||||
CHECK(ext_shard_account.last_trans_hash == ext_trans_hash);
|
||||
CHECK(ext_shard_account.last_trans_lt == ext_trans.lt);
|
||||
ton::WorkchainId wc;
|
||||
ton::StdSmcAddress addr;
|
||||
CHECK(block::tlb::t_MsgAddressInt.extract_std_address(ext_account.addr, wc, addr));
|
||||
CHECK(address.workchain == wc);
|
||||
CHECK(address.addr == addr);
|
||||
}
|
||||
}
|
||||
|
||||
TEST(Emulator, tvm_emulator) {
|
||||
td::Ed25519::PrivateKey priv_key = td::Ed25519::generate_private_key().move_as_ok();
|
||||
auto pub_key = priv_key.get_public_key().move_as_ok();
|
||||
ton::WalletV3::InitData init_data;
|
||||
init_data.public_key = pub_key.as_octet_string();
|
||||
init_data.wallet_id = 239;
|
||||
init_data.seqno = 1337;
|
||||
auto wallet = ton::WalletV3::create(init_data, 2);
|
||||
|
||||
auto code = ton::SmartContractCode::get_code(ton::SmartContractCode::Type::WalletV3, 2);
|
||||
auto code_boc_b64 = td::base64_encode(std_boc_serialize(code).move_as_ok());
|
||||
auto data = ton::WalletV3::get_init_data(init_data);
|
||||
auto data_boc_b64 = td::base64_encode(std_boc_serialize(data).move_as_ok());
|
||||
|
||||
void *tvm_emulator = tvm_emulator_create(code_boc_b64.c_str(), data_boc_b64.c_str(), 1);
|
||||
unsigned method_crc = td::crc16("seqno");
|
||||
unsigned method_id = (method_crc & 0xffff) | 0x10000;
|
||||
auto stack = td::make_ref<vm::Stack>();
|
||||
vm::CellBuilder stack_cb;
|
||||
CHECK(stack->serialize(stack_cb));
|
||||
auto stack_cell = stack_cb.finalize();
|
||||
auto stack_boc = td::base64_encode(std_boc_serialize(stack_cell).move_as_ok());
|
||||
|
||||
char addr_buffer[49] = {0};
|
||||
CHECK(wallet->get_address().rserialize_to(addr_buffer));
|
||||
|
||||
auto rand_seed = std::string(64, 'F');
|
||||
CHECK(tvm_emulator_set_c7(tvm_emulator, addr_buffer, 1337, 10 * Ton, rand_seed.c_str(), config_boc));
|
||||
std::string tvm_res = tvm_emulator_run_get_method(tvm_emulator, method_id, stack_boc.c_str());
|
||||
LOG(ERROR) << "tvm_res = " << tvm_res;
|
||||
|
||||
auto result_json = td::json_decode(td::MutableSlice(tvm_res));
|
||||
CHECK(result_json.is_ok());
|
||||
auto result = result_json.move_as_ok();
|
||||
auto& result_obj = result.get_object();
|
||||
|
||||
auto success_field = td::get_json_object_field(result_obj, "success", td::JsonValue::Type::Boolean, false);
|
||||
CHECK(success_field.is_ok());
|
||||
auto success = success_field.move_as_ok().get_boolean();
|
||||
CHECK(success);
|
||||
|
||||
auto stack_field = td::get_json_object_field(result_obj, "stack", td::JsonValue::Type::String, false);
|
||||
CHECK(stack_field.is_ok());
|
||||
auto stack_val = stack_field.move_as_ok();
|
||||
auto& stack_obj = stack_val.get_string();
|
||||
auto stack_res_boc = td::base64_decode(stack_obj);
|
||||
CHECK(stack_res_boc.is_ok());
|
||||
auto stack_res_cell = vm::std_boc_deserialize(stack_res_boc.move_as_ok());
|
||||
CHECK(stack_res_cell.is_ok());
|
||||
td::Ref<vm::Stack> stack_res;
|
||||
auto stack_res_cs = vm::load_cell_slice(stack_res_cell.move_as_ok());
|
||||
CHECK(vm::Stack::deserialize_to(stack_res_cs, stack_res));
|
||||
CHECK(stack_res->depth() == 1);
|
||||
CHECK(stack_res.write().pop_int()->to_long() == init_data.seqno);
|
||||
}
|
|
@ -25,7 +25,7 @@ td::Result<std::unique_ptr<TransactionEmulator::EmulationResult>> TransactionEmu
|
|||
utime = (unsigned)std::time(nullptr);
|
||||
}
|
||||
|
||||
auto fetch_res = block::FetchConfigParams::fetch_config_params(config_, prev_blocks_info_, &old_mparams,
|
||||
auto fetch_res = block::FetchConfigParams::fetch_config_params(*config_, prev_blocks_info_, &old_mparams,
|
||||
&storage_prices, &storage_phase_cfg,
|
||||
&rand_seed_, &compute_phase_cfg,
|
||||
&action_phase_cfg, &masterchain_create_fee,
|
||||
|
@ -130,17 +130,28 @@ td::Result<TransactionEmulator::EmulationSuccess> TransactionEmulator::emulate_t
|
|||
}
|
||||
|
||||
TRY_RESULT(emulation, emulate_transaction(std::move(account), msg_root, utime, lt, trans_type));
|
||||
|
||||
if (auto emulation_result_ptr = dynamic_cast<EmulationSuccess*>(emulation.get())) {
|
||||
auto& emulation_result = *emulation_result_ptr;
|
||||
|
||||
if (td::Bits256(emulation_result.transaction->get_hash().bits()) != td::Bits256(original_trans->get_hash().bits())) {
|
||||
return td::Status::Error("transaction hash mismatch");
|
||||
}
|
||||
|
||||
auto emulation_result = dynamic_cast<EmulationSuccess&>(*emulation);
|
||||
if (td::Bits256(emulation_result.transaction->get_hash().bits()) != td::Bits256(original_trans->get_hash().bits())) {
|
||||
return td::Status::Error("transaction hash mismatch");
|
||||
if (!check_state_update(emulation_result.account, record_trans)) {
|
||||
return td::Status::Error("account hash mismatch");
|
||||
}
|
||||
|
||||
return emulation_result;
|
||||
|
||||
} else if (auto emulation_not_accepted_ptr = dynamic_cast<EmulationExternalNotAccepted*>(emulation.get())) {
|
||||
return td::Status::Error( PSTRING()
|
||||
<< "VM Log: " << emulation_not_accepted_ptr->vm_log
|
||||
<< ", VM Exit Code: " << emulation_not_accepted_ptr->vm_exit_code
|
||||
<< ", Elapsed Time: " << emulation_not_accepted_ptr->elapsed_time);
|
||||
} else {
|
||||
return td::Status::Error("emulation failed");
|
||||
}
|
||||
|
||||
if (!check_state_update(emulation_result.account, record_trans)) {
|
||||
return td::Status::Error("account hash mismatch");
|
||||
}
|
||||
|
||||
return emulation_result;
|
||||
}
|
||||
|
||||
td::Result<TransactionEmulator::EmulationChain> TransactionEmulator::emulate_transactions_chain(block::Account&& account, std::vector<td::Ref<vm::Cell>>&& original_transactions) {
|
||||
|
@ -227,7 +238,9 @@ td::Result<std::unique_ptr<block::transaction::Transaction>> TransactionEmulator
|
|||
return td::Status::Error(-669,"cannot create action phase of a new transaction for smart contract "s + acc->addr.to_hex());
|
||||
}
|
||||
|
||||
if (trans->bounce_enabled && !trans->compute_phase->success && !trans->prepare_bounce_phase(*action_phase_cfg)) {
|
||||
if (trans->bounce_enabled
|
||||
&& (!trans->compute_phase->success || trans->action_phase->state_exceeds_limits || trans->action_phase->bounce)
|
||||
&& !trans->prepare_bounce_phase(*action_phase_cfg)) {
|
||||
return td::Status::Error(-669,"cannot create bounce phase of a new transaction for smart contract "s + acc->addr.to_hex());
|
||||
}
|
||||
|
||||
|
@ -250,8 +263,8 @@ void TransactionEmulator::set_ignore_chksig(bool ignore_chksig) {
|
|||
ignore_chksig_ = ignore_chksig;
|
||||
}
|
||||
|
||||
void TransactionEmulator::set_config(block::Config &&config) {
|
||||
config_ = std::forward<block::Config>(config);
|
||||
void TransactionEmulator::set_config(std::shared_ptr<block::Config> config) {
|
||||
config_ = std::move(config);
|
||||
}
|
||||
|
||||
void TransactionEmulator::set_libs(vm::Dictionary &&libs) {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
namespace emulator {
|
||||
class TransactionEmulator {
|
||||
block::Config config_;
|
||||
std::shared_ptr<block::Config> config_;
|
||||
vm::Dictionary libraries_;
|
||||
int vm_log_verbosity_;
|
||||
ton::UnixTime unixtime_;
|
||||
|
@ -20,7 +20,7 @@ class TransactionEmulator {
|
|||
td::Ref<vm::Tuple> prev_blocks_info_;
|
||||
|
||||
public:
|
||||
TransactionEmulator(block::Config&& config, int vm_log_verbosity = 0) :
|
||||
TransactionEmulator(std::shared_ptr<block::Config> config, int vm_log_verbosity = 0) :
|
||||
config_(std::move(config)), libraries_(256), vm_log_verbosity_(vm_log_verbosity),
|
||||
unixtime_(0), lt_(0), rand_seed_(td::BitArray<256>::zero()), ignore_chksig_(false), debug_enabled_(false) {
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ public:
|
|||
};
|
||||
|
||||
const block::Config& get_config() {
|
||||
return config_;
|
||||
return *config_;
|
||||
}
|
||||
|
||||
ton::UnixTime get_unixtime() {
|
||||
|
@ -74,7 +74,7 @@ public:
|
|||
void set_lt(ton::LogicalTime lt);
|
||||
void set_rand_seed(td::BitArray<256>& rand_seed);
|
||||
void set_ignore_chksig(bool ignore_chksig);
|
||||
void set_config(block::Config &&config);
|
||||
void set_config(std::shared_ptr<block::Config> config);
|
||||
void set_libs(vm::Dictionary &&libs);
|
||||
void set_debug_enabled(bool debug_enabled);
|
||||
void set_prev_blocks_info(td::Ref<vm::Tuple> prev_blocks_info);
|
||||
|
|
|
@ -24,12 +24,12 @@ public:
|
|||
}
|
||||
|
||||
void set_c7(block::StdAddress address, uint32_t unixtime, uint64_t balance, td::BitArray<256> rand_seed, std::shared_ptr<const block::Config> config) {
|
||||
args_.set_address(address);
|
||||
args_.set_address(std::move(address));
|
||||
args_.set_now(unixtime);
|
||||
args_.set_balance(balance);
|
||||
args_.set_rand_seed(rand_seed);
|
||||
args_.set_rand_seed(std::move(rand_seed));
|
||||
if (config) {
|
||||
args_.set_config(config);
|
||||
args_.set_config(std::move(config));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,10 @@ public:
|
|||
args_.set_c7(std::move(c7));
|
||||
}
|
||||
|
||||
void set_config(std::shared_ptr<const block::Config> config) {
|
||||
args_.set_config(std::move(config));
|
||||
}
|
||||
|
||||
void set_prev_blocks_info(td::Ref<vm::Tuple> tuple) {
|
||||
args_.set_prev_blocks_info(std::move(tuple));
|
||||
}
|
||||
|
@ -46,7 +50,8 @@ public:
|
|||
}
|
||||
|
||||
Answer run_get_method(int method_id, td::Ref<vm::Stack> stack) {
|
||||
return smc_.run_get_method(args_.set_stack(stack).set_method_id(method_id));
|
||||
ton::SmartContract::Args args = args_;
|
||||
return smc_.run_get_method(args.set_stack(stack).set_method_id(method_id));
|
||||
}
|
||||
|
||||
Answer send_external_message(td::Ref<vm::Cell> message_body) {
|
||||
|
@ -54,7 +59,8 @@ public:
|
|||
}
|
||||
|
||||
Answer send_internal_message(td::Ref<vm::Cell> message_body, uint64_t amount) {
|
||||
return smc_.send_internal_message(message_body, args_.set_amount(amount));
|
||||
ton::SmartContract::Args args = args_;
|
||||
return smc_.send_internal_message(message_body, args.set_amount(amount));
|
||||
}
|
||||
};
|
||||
}
|
|
@ -949,8 +949,8 @@ bool TestNode::show_help(std::string command) {
|
|||
"lasttrans[dump] <account-id> <trans-lt> <trans-hash> [<count>]\tShows or dumps specified transaction and "
|
||||
"several preceding "
|
||||
"ones\n"
|
||||
"listblocktrans[rev] <block-id-ext> <count> [<start-account-id> <start-trans-lt>]\tLists block transactions, "
|
||||
"starting immediately after or before the specified one\n"
|
||||
"listblocktrans[rev][meta] <block-id-ext> <count> [<start-account-id> <start-trans-lt>]\tLists block "
|
||||
"transactions, starting immediately after or before the specified one\n"
|
||||
"blkproofchain[step] <from-block-id-ext> [<to-block-id-ext>]\tDownloads and checks proof of validity of the "
|
||||
"second "
|
||||
"indicated block (or the last known masterchain block) starting from given block\n"
|
||||
|
@ -1074,6 +1074,13 @@ bool TestNode::do_parse_line() {
|
|||
return parse_block_id_ext(blkid) && parse_uint32(count) &&
|
||||
(seekeoln() || (parse_hash(hash) && parse_lt(lt) && (mode |= 128) && seekeoln())) &&
|
||||
get_block_transactions(blkid, mode, count, hash, lt);
|
||||
} else if (word == "listblocktransmeta" || word == "listblocktransrevmeta") {
|
||||
lt = 0;
|
||||
int mode = (word == "listblocktransmeta" ? 7 : 0x47);
|
||||
mode |= 256;
|
||||
return parse_block_id_ext(blkid) && parse_uint32(count) &&
|
||||
(seekeoln() || (parse_hash(hash) && parse_lt(lt) && (mode |= 128) && seekeoln())) &&
|
||||
get_block_transactions(blkid, mode, count, hash, lt);
|
||||
} else if (word == "blkproofchain" || word == "blkproofchainstep") {
|
||||
ton::BlockIdExt blkid2{};
|
||||
return parse_block_id_ext(blkid) && (seekeoln() || parse_block_id_ext(blkid2)) && seekeoln() &&
|
||||
|
@ -2493,23 +2500,40 @@ bool TestNode::get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned
|
|||
} else {
|
||||
auto f = F.move_as_ok();
|
||||
std::vector<TransId> transactions;
|
||||
std::vector<ton::tl_object_ptr<ton::lite_api::liteServer_transactionMetadata>> metadata;
|
||||
for (auto& id : f->ids_) {
|
||||
transactions.emplace_back(id->account_, id->lt_, id->hash_);
|
||||
metadata.push_back(std::move(id->metadata_));
|
||||
}
|
||||
td::actor::send_closure_later(Self, &TestNode::got_block_transactions, ton::create_block_id(f->id_), mode,
|
||||
f->req_count_, f->incomplete_, std::move(transactions), std::move(f->proof_));
|
||||
f->req_count_, f->incomplete_, std::move(transactions), std::move(metadata),
|
||||
std::move(f->proof_));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void TestNode::got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete,
|
||||
std::vector<TestNode::TransId> trans, td::BufferSlice proof) {
|
||||
void TestNode::got_block_transactions(
|
||||
ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete, std::vector<TestNode::TransId> trans,
|
||||
std::vector<ton::tl_object_ptr<ton::lite_api::liteServer_transactionMetadata>> metadata, td::BufferSlice proof) {
|
||||
LOG(INFO) << "got up to " << req_count << " transactions from block " << blkid.to_str();
|
||||
auto out = td::TerminalIO::out();
|
||||
int count = 0;
|
||||
for (auto& t : trans) {
|
||||
for (size_t i = 0; i < trans.size(); ++i) {
|
||||
auto& t = trans[i];
|
||||
out << "transaction #" << ++count << ": account " << t.acc_addr.to_hex() << " lt " << t.trans_lt << " hash "
|
||||
<< t.trans_hash.to_hex() << std::endl;
|
||||
if (mode & 256) {
|
||||
auto& meta = metadata.at(i);
|
||||
if (meta == nullptr) {
|
||||
out << " metadata: <none>" << std::endl;
|
||||
} else {
|
||||
out << " metadata: "
|
||||
<< block::MsgMetadata{(td::uint32)meta->depth_, meta->initiator_->workchain_, meta->initiator_->id_,
|
||||
(ton::LogicalTime)meta->initiator_lt_}
|
||||
.to_str()
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
out << (incomplete ? "(block transaction list incomplete)" : "(end of block transaction list)") << std::endl;
|
||||
}
|
||||
|
|
|
@ -258,7 +258,9 @@ class TestNode : public td::actor::Actor {
|
|||
bool get_block_transactions(ton::BlockIdExt blkid, int mode, unsigned count, ton::Bits256 acc_addr,
|
||||
ton::LogicalTime lt);
|
||||
void got_block_transactions(ton::BlockIdExt blkid, int mode, unsigned req_count, bool incomplete,
|
||||
std::vector<TransId> trans, td::BufferSlice proof);
|
||||
std::vector<TransId> trans,
|
||||
std::vector<ton::tl_object_ptr<ton::lite_api::liteServer_transactionMetadata>> metadata,
|
||||
td::BufferSlice proof);
|
||||
bool get_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode);
|
||||
void got_block_proof(ton::BlockIdExt from, ton::BlockIdExt to, int mode, td::BufferSlice res);
|
||||
bool get_creator_stats(ton::BlockIdExt blkid, int mode, unsigned req_count, ton::Bits256 start_after,
|
||||
|
|
|
@ -78,7 +78,6 @@ td::Status OverlayFecBroadcastPart::check_signature() {
|
|||
}
|
||||
|
||||
td::Status OverlayFecBroadcastPart::run_checks() {
|
||||
|
||||
TRY_STATUS(check_time());
|
||||
TRY_STATUS(check_duplicate());
|
||||
TRY_STATUS(check_source());
|
||||
|
@ -94,14 +93,17 @@ void BroadcastFec::broadcast_checked(td::Result<td::Unit> R) {
|
|||
overlay_->deliver_broadcast(get_source().compute_short_id(), data_.clone());
|
||||
auto manager = overlay_->overlay_manager();
|
||||
while (!parts_.empty()) {
|
||||
distribute_part(parts_.begin()->first);
|
||||
distribute_part(parts_.begin()->first);
|
||||
}
|
||||
|
||||
is_checked_ = true;
|
||||
}
|
||||
|
||||
// Do we need status here??
|
||||
td::Status BroadcastFec::distribute_part(td::uint32 seqno) {
|
||||
td::Status BroadcastFec::distribute_part(td::uint32 seqno) {
|
||||
auto i = parts_.find(seqno);
|
||||
if (i == parts_.end()) {
|
||||
VLOG(OVERLAY_WARNING) << "not distibuting empty part " << seqno;
|
||||
// should not get here
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
@ -132,7 +134,6 @@ td::Status BroadcastFec::distribute_part(td::uint32 seqno) {
|
|||
}
|
||||
|
||||
td::Status OverlayFecBroadcastPart::apply() {
|
||||
|
||||
if (!bcast_) {
|
||||
bcast_ = overlay_->get_fec_broadcast(broadcast_hash_);
|
||||
}
|
||||
|
@ -165,16 +166,20 @@ td::Status OverlayFecBroadcastPart::apply() {
|
|||
return S;
|
||||
}
|
||||
} else {
|
||||
if(untrusted_) {
|
||||
if (untrusted_) {
|
||||
auto P = td::PromiseCreator::lambda(
|
||||
[id = broadcast_hash_, overlay_id = actor_id(overlay_)](td::Result<td::Unit> RR) mutable {
|
||||
td::actor::send_closure(std::move(overlay_id), &OverlayImpl::broadcast_checked, id, std::move(RR));
|
||||
});
|
||||
[id = broadcast_hash_, overlay_id = actor_id(overlay_)](td::Result<td::Unit> RR) mutable {
|
||||
td::actor::send_closure(std::move(overlay_id), &OverlayImpl::broadcast_checked, id, std::move(RR));
|
||||
});
|
||||
overlay_->check_broadcast(bcast_->get_source().compute_short_id(), R.move_as_ok(), std::move(P));
|
||||
} else {
|
||||
overlay_->deliver_broadcast(bcast_->get_source().compute_short_id(), R.move_as_ok());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bcast_->set_overlay(overlay_);
|
||||
bcast_->set_src_peer_id(src_peer_id_);
|
||||
TRY_STATUS(bcast_->add_part(seqno_, data_.clone(), export_serialized_short(), export_serialized()));
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
@ -304,7 +309,8 @@ td::Status OverlayFecBroadcastPart::create_new(OverlayImpl *overlay, td::actor::
|
|||
|
||||
auto B = std::make_unique<OverlayFecBroadcastPart>(
|
||||
broadcast_hash, part_hash, PublicKey{}, overlay->get_certificate(local_id), data_hash, size, flags,
|
||||
part_data_hash, std::move(part), seqno, std::move(fec_type), date, td::BufferSlice{}, false, nullptr, overlay, adnl::AdnlNodeIdShort::zero());
|
||||
part_data_hash, std::move(part), seqno, std::move(fec_type), date, td::BufferSlice{}, false, nullptr, overlay,
|
||||
adnl::AdnlNodeIdShort::zero());
|
||||
auto to_sign = B->to_sign();
|
||||
|
||||
auto P = td::PromiseCreator::lambda(
|
||||
|
|
|
@ -82,15 +82,15 @@ class BroadcastFec : public td::ListNode {
|
|||
}
|
||||
}
|
||||
|
||||
td::Status add_part(td::uint32 seqno, td::BufferSlice data,
|
||||
td::BufferSlice serialized_fec_part_short,
|
||||
td::Status add_part(td::uint32 seqno, td::BufferSlice data, td::BufferSlice serialized_fec_part_short,
|
||||
td::BufferSlice serialized_fec_part) {
|
||||
CHECK(decoder_);
|
||||
td::fec::Symbol s;
|
||||
s.id = seqno;
|
||||
s.data = std::move(data);
|
||||
if (decoder_) {
|
||||
td::fec::Symbol s;
|
||||
s.id = seqno;
|
||||
s.data = std::move(data);
|
||||
|
||||
decoder_->add_symbol(std::move(s));
|
||||
decoder_->add_symbol(std::move(s));
|
||||
}
|
||||
parts_[seqno] = std::pair<td::BufferSlice, td::BufferSlice>(std::move(serialized_fec_part_short),
|
||||
std::move(serialized_fec_part));
|
||||
|
||||
|
@ -200,8 +200,13 @@ class BroadcastFec : public td::ListNode {
|
|||
|
||||
td::Status distribute_part(td::uint32 seqno);
|
||||
|
||||
bool is_checked() const {
|
||||
return is_checked_;
|
||||
}
|
||||
|
||||
private:
|
||||
bool ready_ = false;
|
||||
bool is_checked_ = false;
|
||||
|
||||
Overlay::BroadcastHash hash_;
|
||||
Overlay::BroadcastDataHash data_hash_;
|
||||
|
@ -281,7 +286,7 @@ class OverlayFecBroadcastPart : public td::ListNode {
|
|||
, signature_(std::move(signature))
|
||||
, is_short_(is_short)
|
||||
, bcast_(bcast)
|
||||
, overlay_(overlay)
|
||||
, overlay_(overlay)
|
||||
, src_peer_id_(src_peer_id) {
|
||||
}
|
||||
|
||||
|
@ -300,7 +305,7 @@ class OverlayFecBroadcastPart : public td::ListNode {
|
|||
signature_ = std::move(signature);
|
||||
}
|
||||
void update_overlay(OverlayImpl *overlay);
|
||||
|
||||
|
||||
tl_object_ptr<ton_api::overlay_broadcastFec> export_tl();
|
||||
tl_object_ptr<ton_api::overlay_broadcastFecShort> export_tl_short();
|
||||
td::BufferSlice export_serialized();
|
||||
|
@ -310,14 +315,16 @@ class OverlayFecBroadcastPart : public td::ListNode {
|
|||
td::Status run() {
|
||||
TRY_STATUS(run_checks());
|
||||
TRY_STATUS(apply());
|
||||
if(!untrusted_) {
|
||||
if (!untrusted_ || bcast_->is_checked()) {
|
||||
TRY_STATUS(distribute());
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id, tl_object_ptr<ton_api::overlay_broadcastFec> broadcast);
|
||||
static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id, tl_object_ptr<ton_api::overlay_broadcastFecShort> broadcast);
|
||||
static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id,
|
||||
tl_object_ptr<ton_api::overlay_broadcastFec> broadcast);
|
||||
static td::Status create(OverlayImpl *overlay, adnl::AdnlNodeIdShort src_peer_id,
|
||||
tl_object_ptr<ton_api::overlay_broadcastFecShort> broadcast);
|
||||
static td::Status create_new(OverlayImpl *overlay, td::actor::ActorId<OverlayImpl> overlay_actor_id,
|
||||
PublicKeyHash local_id, Overlay::BroadcastDataHash data_hash, td::uint32 size,
|
||||
td::uint32 flags, td::BufferSlice part, td::uint32 seqno, fec::FecType fec_type,
|
||||
|
|
|
@ -1,11 +1,16 @@
|
|||
## 2024.04 Update
|
||||
## 2024.08 Update
|
||||
|
||||
1. Introduction of dispatch queues, message envelopes with transaction chain metadata, and explicitly stored msg_queue size, which will be activated by `Config8.version >= 8` and new `Config8.capabilities` bits: `capStoreOutMsgQueueSize`, `capMsgMetadata`, `capDeferMessages`.
|
||||
2. A number of changes to transcation executor which will activated for `Config8.version >= 8`:
|
||||
- Check mode on invalid `action_send_msg`. Ignore action if `IGNORE_ERROR` (+2) bit is set, bounce if `BOUNCE_ON_FAIL` (+16) bit is set.
|
||||
- Slightly change random seed generation to fix mix of `addr_rewrite` and `addr`.
|
||||
- Fill in `skipped_actions` for both invalid and valid messages with `IGNORE_ERROR` mode that can't be sent.
|
||||
- Allow unfreeze through external messages.
|
||||
- Don't use user-provided `fwd_fee` and `ihr_fee` for internal messages.
|
||||
3. A few issues with broadcasts were fixed: stop on receiving last piece, response to AdnlMessageCreateChannel
|
||||
4. A number of fixes and improvements for emulator and tonlib: correct work with config_addr, not accepted externals, bounces, debug ops gas consumption, added version and c5 dump, fixed tonlib crashes
|
||||
5. Added new flags and commands to the node, in particular `--fast-state-serializer`, `getcollatoroptionsjson`, `setcollatoroptionsjson`
|
||||
|
||||
Besides the work of the core team, this update is based on the efforts of @krigga (emulator), stonfi team, in particular @dbaranovstonfi and @hey-researcher (emulator), and @loeul, @xiaoxianBoy, @simlecode (typos in comments and docs).
|
||||
|
||||
|
||||
1. Make Jemalloc default allocator
|
||||
2. Add candidate broadcasting and caching
|
||||
3. Limit per address speed for external messages broadcast by reasonably large number
|
||||
4. Overlay improvements: fix dropping peers in small custom overlays, fix wrong certificate on missed keyblocks
|
||||
5. Extended statistics and logs for celldb usage, session stats, persistent state serialization
|
||||
6. Tonlib and explorer fixes
|
||||
7. Flags for precize control of Celldb: `--celldb-cache-size`, `--celldb-direct-io` and `--celldb-preload-all`
|
||||
8. Add valiator-console command to stop persistent state serialization
|
||||
9. Use `@` path separator for defining include path in fift and create-state utilities on Windows only.
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
*/
|
||||
#pragma once
|
||||
#include "td/utils/Status.h"
|
||||
#include "td/utils/Time.h"
|
||||
#include "td/utils/logging.h"
|
||||
#include <functional>
|
||||
namespace td {
|
||||
|
|
|
@ -258,11 +258,17 @@ Status RocksDb::flush() {
|
|||
|
||||
Status RocksDb::begin_snapshot() {
|
||||
snapshot_.reset(db_->GetSnapshot());
|
||||
if (options_.snapshot_statistics) {
|
||||
options_.snapshot_statistics->begin_snapshot(snapshot_.get());
|
||||
}
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
Status RocksDb::end_snapshot() {
|
||||
if (snapshot_) {
|
||||
if (options_.snapshot_statistics) {
|
||||
options_.snapshot_statistics->end_snapshot(snapshot_.get());
|
||||
}
|
||||
db_->ReleaseSnapshot(snapshot_.release());
|
||||
}
|
||||
return td::Status::OK();
|
||||
|
@ -271,4 +277,42 @@ Status RocksDb::end_snapshot() {
|
|||
RocksDb::RocksDb(std::shared_ptr<rocksdb::OptimisticTransactionDB> db, RocksDbOptions options)
|
||||
: db_(std::move(db)), options_(options) {
|
||||
}
|
||||
|
||||
void RocksDbSnapshotStatistics::begin_snapshot(const rocksdb::Snapshot *snapshot) {
|
||||
auto lock = std::unique_lock<std::mutex>(mutex_);
|
||||
auto id = reinterpret_cast<std::uintptr_t>(snapshot);
|
||||
auto ts = td::Timestamp::now().at();
|
||||
CHECK(id_to_ts_.emplace(id, ts).second);
|
||||
CHECK(by_ts_.emplace(ts, id).second);
|
||||
}
|
||||
|
||||
void RocksDbSnapshotStatistics::end_snapshot(const rocksdb::Snapshot *snapshot) {
|
||||
auto lock = std::unique_lock<std::mutex>(mutex_);
|
||||
auto id = reinterpret_cast<std::uintptr_t>(snapshot);
|
||||
auto it = id_to_ts_.find(id);
|
||||
CHECK(it != id_to_ts_.end());
|
||||
auto ts = it->second;
|
||||
CHECK(by_ts_.erase(std::make_pair(ts, id)) == 1u);
|
||||
CHECK(id_to_ts_.erase(id) == 1u);
|
||||
}
|
||||
|
||||
td::Timestamp RocksDbSnapshotStatistics::oldest_snapshot_timestamp() const {
|
||||
auto lock = std::unique_lock<std::mutex>(mutex_);
|
||||
if (by_ts_.empty()) {
|
||||
return {};
|
||||
}
|
||||
return td::Timestamp::at(by_ts_.begin()->first);
|
||||
}
|
||||
|
||||
std::string RocksDbSnapshotStatistics::to_string() const {
|
||||
td::Timestamp oldest_snapshot = oldest_snapshot_timestamp();
|
||||
double value;
|
||||
if (oldest_snapshot) {
|
||||
value = td::Timestamp::now().at() - oldest_snapshot.at();
|
||||
} else {
|
||||
value = -1;
|
||||
}
|
||||
return PSTRING() << "td.rocksdb.snapshot.oldest_snapshot_ago.seconds : " << value << "\n";
|
||||
}
|
||||
|
||||
} // namespace td
|
||||
|
|
|
@ -26,6 +26,12 @@
|
|||
#include "td/utils/Status.h"
|
||||
#include "td/utils/optional.h"
|
||||
|
||||
#include "td/utils/Time.h"
|
||||
|
||||
#include <map>
|
||||
#include <mutex>
|
||||
#include <set>
|
||||
|
||||
namespace rocksdb {
|
||||
class Cache;
|
||||
class OptimisticTransactionDB;
|
||||
|
@ -36,10 +42,22 @@ class Statistics;
|
|||
} // namespace rocksdb
|
||||
|
||||
namespace td {
|
||||
struct RocksDbSnapshotStatistics {
|
||||
void begin_snapshot(const rocksdb::Snapshot *snapshot);
|
||||
void end_snapshot(const rocksdb::Snapshot *snapshot);
|
||||
td::Timestamp oldest_snapshot_timestamp() const;
|
||||
std::string to_string() const;
|
||||
|
||||
private:
|
||||
mutable std::mutex mutex_;
|
||||
std::map<std::uintptr_t, double> id_to_ts_;
|
||||
std::set<std::pair<double, std::uintptr_t>> by_ts_;
|
||||
};
|
||||
|
||||
struct RocksDbOptions {
|
||||
std::shared_ptr<rocksdb::Statistics> statistics = nullptr;
|
||||
std::shared_ptr<rocksdb::Cache> block_cache; // Default - one 1G cache for all RocksDb
|
||||
std::shared_ptr<RocksDbSnapshotStatistics> snapshot_statistics = nullptr;
|
||||
bool use_direct_reads = false;
|
||||
};
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ namespace td {
|
|||
// Generic stream interface
|
||||
// Will to hide implementations details.
|
||||
// CyclicBuffer, ChainBuffer, Bounded ChainBuffer, some clever writers. They all should be interchangable
|
||||
// Most implementaions will assume that reading and writing may happen concurrently
|
||||
// Most implementations will assume that reading and writing may happen concurrently
|
||||
|
||||
class StreamReaderInterface {
|
||||
public:
|
||||
|
|
|
@ -73,7 +73,7 @@ Result<bool> StreamToFileActor::do_loop() {
|
|||
// Also it could be useful to check error and stop immediately.
|
||||
TRY_RESULT(is_closed, is_closed());
|
||||
|
||||
// Flush all data that is awailable on the at the beginning of loop
|
||||
// Flush all data that is available on the at the beginning of loop
|
||||
TRY_STATUS(do_flush_once());
|
||||
|
||||
if ((sync_at_ && sync_at_.is_in_past()) || is_closed) {
|
||||
|
|
|
@ -60,9 +60,20 @@ TEST(KeyValue, simple) {
|
|||
ensure_value(as_slice(x), as_slice(x));
|
||||
|
||||
kv.reset();
|
||||
kv = std::make_unique<td::RocksDb>(td::RocksDb::open(db_name.str()).move_as_ok());
|
||||
td::RocksDbOptions options;
|
||||
options.snapshot_statistics = std::make_shared<td::RocksDbSnapshotStatistics>();
|
||||
kv = std::make_unique<td::RocksDb>(td::RocksDb::open(db_name.str(), options).move_as_ok());
|
||||
ensure_value("A", "HELLO");
|
||||
ensure_value(as_slice(x), as_slice(x));
|
||||
|
||||
CHECK(!options.snapshot_statistics->oldest_snapshot_timestamp());
|
||||
auto snapshot = kv->snapshot();
|
||||
CHECK(options.snapshot_statistics->oldest_snapshot_timestamp());
|
||||
auto snapshot2 = kv->snapshot();
|
||||
snapshot.reset();
|
||||
CHECK(options.snapshot_statistics->oldest_snapshot_timestamp());
|
||||
snapshot2.reset();
|
||||
CHECK(!options.snapshot_statistics->oldest_snapshot_timestamp());
|
||||
};
|
||||
|
||||
TEST(KeyValue, async_simple) {
|
||||
|
|
|
@ -68,9 +68,14 @@ Result<T> read_file_impl(CSlice path, int64 size, int64 offset) {
|
|||
return Status::Error("Failed to read file: invalid size");
|
||||
}
|
||||
auto content = create_empty<T>(narrow_cast<size_t>(size));
|
||||
TRY_RESULT(got_size, from_file.pread(as_mutable_slice(content), offset));
|
||||
if (got_size != static_cast<size_t>(size)) {
|
||||
return Status::Error("Failed to read file");
|
||||
MutableSlice slice = as_mutable_slice(content);
|
||||
while (!slice.empty()) {
|
||||
TRY_RESULT(got_size, from_file.pread(slice, offset));
|
||||
if (got_size == 0) {
|
||||
return Status::Error("Failed to read file");
|
||||
}
|
||||
offset += got_size;
|
||||
slice.remove_prefix(got_size);
|
||||
}
|
||||
from_file.close();
|
||||
return std::move(content);
|
||||
|
@ -103,9 +108,15 @@ Status write_file(CSlice to, Slice data, WriteFileOptions options) {
|
|||
TRY_STATUS(to_file.lock(FileFd::LockFlags::Write, to.str(), 10));
|
||||
TRY_STATUS(to_file.truncate_to_current_position(0));
|
||||
}
|
||||
TRY_RESULT(written, to_file.write(data));
|
||||
if (written != size) {
|
||||
return Status::Error(PSLICE() << "Failed to write file: written " << written << " bytes instead of " << size);
|
||||
size_t total_written = 0;
|
||||
while (!data.empty()) {
|
||||
TRY_RESULT(written, to_file.write(data));
|
||||
if (written == 0) {
|
||||
return Status::Error(PSLICE() << "Failed to write file: written " << total_written << " bytes instead of "
|
||||
<< size);
|
||||
}
|
||||
total_written += written;
|
||||
data.remove_prefix(written);
|
||||
}
|
||||
if (options.need_sync) {
|
||||
TRY_STATUS(to_file.sync());
|
||||
|
|
|
@ -66,6 +66,12 @@ class optional {
|
|||
DCHECK(*this);
|
||||
return impl_.ok_ref();
|
||||
}
|
||||
T &value_force() {
|
||||
if (!*this) {
|
||||
*this = T();
|
||||
}
|
||||
return value();
|
||||
}
|
||||
T &operator*() {
|
||||
return value();
|
||||
}
|
||||
|
@ -88,6 +94,14 @@ class optional {
|
|||
impl_.emplace(std::forward<ArgsT>(args)...);
|
||||
}
|
||||
|
||||
bool operator==(const optional& other) const {
|
||||
return (bool)*this == (bool)other && (!(bool)*this || value() == other.value());
|
||||
}
|
||||
|
||||
bool operator!=(const optional& other) const {
|
||||
return !(*this == other);
|
||||
}
|
||||
|
||||
private:
|
||||
Result<T> impl_;
|
||||
};
|
||||
|
|
|
@ -413,4 +413,52 @@ Result<CpuStat> cpu_stat() {
|
|||
#endif
|
||||
}
|
||||
|
||||
Result<uint64> get_total_ram() {
|
||||
#if TD_LINUX
|
||||
TRY_RESULT(fd, FileFd::open("/proc/meminfo", FileFd::Read));
|
||||
SCOPE_EXIT {
|
||||
fd.close();
|
||||
};
|
||||
constexpr int TMEM_SIZE = 10000;
|
||||
char mem[TMEM_SIZE];
|
||||
TRY_RESULT(size, fd.read(MutableSlice(mem, TMEM_SIZE - 1)));
|
||||
if (size >= TMEM_SIZE - 1) {
|
||||
return Status::Error("Failed for read /proc/meminfo");
|
||||
}
|
||||
mem[size] = 0;
|
||||
const char* s = mem;
|
||||
while (*s) {
|
||||
const char *name_begin = s;
|
||||
while (*s != 0 && *s != '\n') {
|
||||
s++;
|
||||
}
|
||||
auto name_end = name_begin;
|
||||
while (is_alpha(*name_end)) {
|
||||
name_end++;
|
||||
}
|
||||
Slice name(name_begin, name_end);
|
||||
if (name == "MemTotal") {
|
||||
Slice value(name_end, s);
|
||||
if (!value.empty() && value[0] == ':') {
|
||||
value.remove_prefix(1);
|
||||
}
|
||||
value = trim(value);
|
||||
value = split(value).first;
|
||||
TRY_RESULT_PREFIX(mem, to_integer_safe<uint64>(value), "Invalid value of MemTotal");
|
||||
if (mem >= 1ULL << (64 - 10)) {
|
||||
return Status::Error("Invalid value of MemTotal");
|
||||
}
|
||||
return mem * 1024;
|
||||
}
|
||||
if (*s == 0) {
|
||||
break;
|
||||
}
|
||||
s++;
|
||||
}
|
||||
return Status::Error("No MemTotal in /proc/meminfo");
|
||||
#else
|
||||
return Status::Error("Not supported");
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace td
|
||||
|
|
|
@ -64,4 +64,6 @@ Status update_atime(CSlice path) TD_WARN_UNUSED_RESULT;
|
|||
|
||||
#endif
|
||||
|
||||
Result<uint64> get_total_ram() TD_WARN_UNUSED_RESULT;
|
||||
|
||||
} // namespace td
|
||||
|
|
|
@ -159,6 +159,7 @@ std::string lite_query_name_by_id(int id) {
|
|||
{lite_api::liteServer_getLibrariesWithProof::ID, "getLibrariesWithProof"},
|
||||
{lite_api::liteServer_getShardBlockProof::ID, "getShardBlockProof"},
|
||||
{lite_api::liteServer_getOutMsgQueueSizes::ID, "getOutMsgQueueSizes"},
|
||||
{lite_api::liteServer_getBlockOutMsgQueueSize::ID, "getBlockOutMsgQueueSize"},
|
||||
{lite_api::liteServer_nonfinal_getCandidate::ID, "nonfinal.getCandidate"},
|
||||
{lite_api::liteServer_nonfinal_getValidatorGroups::ID, "nonfinal.getValidatorGroups"}};
|
||||
auto it = names.find(id);
|
||||
|
|
|
@ -41,7 +41,8 @@ liteServer.shardInfo id:tonNode.blockIdExt shardblk:tonNode.blockIdExt shard_pro
|
|||
liteServer.allShardsInfo id:tonNode.blockIdExt proof:bytes data:bytes = liteServer.AllShardsInfo;
|
||||
liteServer.transactionInfo id:tonNode.blockIdExt proof:bytes transaction:bytes = liteServer.TransactionInfo;
|
||||
liteServer.transactionList ids:(vector tonNode.blockIdExt) transactions:bytes = liteServer.TransactionList;
|
||||
liteServer.transactionId mode:# account:mode.0?int256 lt:mode.1?long hash:mode.2?int256 = liteServer.TransactionId;
|
||||
liteServer.transactionMetadata mode:# depth:int initiator:liteServer.accountId initiator_lt:long = liteServer.TransactionMetadata;
|
||||
liteServer.transactionId#b12f65af mode:# account:mode.0?int256 lt:mode.1?long hash:mode.2?int256 metadata:mode.8?liteServer.transactionMetadata = liteServer.TransactionId;
|
||||
liteServer.transactionId3 account:int256 lt:long = liteServer.TransactionId3;
|
||||
liteServer.blockTransactions id:tonNode.blockIdExt req_count:# incomplete:Bool ids:(vector liteServer.transactionId) proof:bytes = liteServer.BlockTransactions;
|
||||
liteServer.blockTransactionsExt id:tonNode.blockIdExt req_count:# incomplete:Bool transactions:bytes proof:bytes = liteServer.BlockTransactionsExt;
|
||||
|
@ -59,6 +60,7 @@ liteServer.shardBlockProof masterchain_id:tonNode.blockIdExt links:(vector liteS
|
|||
liteServer.lookupBlockResult id:tonNode.blockIdExt mode:# mc_block_id:tonNode.blockIdExt client_mc_state_proof:bytes mc_block_proof:bytes shard_links:(vector liteServer.shardBlockLink) header:bytes prev_header:bytes = liteServer.LookupBlockResult;
|
||||
liteServer.outMsgQueueSize id:tonNode.blockIdExt size:int = liteServer.OutMsgQueueSize;
|
||||
liteServer.outMsgQueueSizes shards:(vector liteServer.outMsgQueueSize) ext_msg_queue_size_limit:int = liteServer.OutMsgQueueSizes;
|
||||
liteServer.blockOutMsgQueueSize mode:# id:tonNode.blockIdExt size:long proof:mode.0?bytes = liteServer.BlockOutMsgQueueSize;
|
||||
|
||||
liteServer.debug.verbosity value:int = liteServer.debug.Verbosity;
|
||||
|
||||
|
@ -97,6 +99,7 @@ liteServer.getLibraries library_list:(vector int256) = liteServer.LibraryResult;
|
|||
liteServer.getLibrariesWithProof id:tonNode.blockIdExt mode:# library_list:(vector int256) = liteServer.LibraryResultWithProof;
|
||||
liteServer.getShardBlockProof id:tonNode.blockIdExt = liteServer.ShardBlockProof;
|
||||
liteServer.getOutMsgQueueSizes mode:# wc:mode.0?int shard:mode.0?long = liteServer.OutMsgQueueSizes;
|
||||
liteServer.getBlockOutMsgQueueSize mode:# id:tonNode.blockIdExt want_proof:mode.0?true = liteServer.BlockOutMsgQueueSize;
|
||||
|
||||
liteServer.nonfinal.getValidatorGroups mode:# wc:mode.0?int shard:mode.0?long = liteServer.nonfinal.ValidatorGroups;
|
||||
liteServer.nonfinal.getCandidate id:liteServer.nonfinal.candidateId = liteServer.nonfinal.Candidate;
|
||||
|
|
Binary file not shown.
|
@ -606,6 +606,11 @@ engine.validator.customOverlayNode adnl_id:int256 msg_sender:Bool msg_sender_pri
|
|||
engine.validator.customOverlay name:string nodes:(vector engine.validator.customOverlayNode) = engine.validator.CustomOverlay;
|
||||
engine.validator.customOverlaysConfig overlays:(vector engine.validator.customOverlay) = engine.validator.CustomOverlaysConfig;
|
||||
|
||||
engine.validator.collatorOptions
|
||||
deferring_enabled:Bool defer_messages_after:int defer_out_queue_size_limit:long
|
||||
dispatch_phase_2_max_total:int dispatch_phase_3_max_total:int
|
||||
dispatch_phase_2_max_per_initiator:int dispatch_phase_3_max_per_initiator:int = engine.validator.CollatorOptions;
|
||||
|
||||
---functions---
|
||||
---types---
|
||||
|
||||
|
@ -653,7 +658,7 @@ engine.validator.onePerfTimerStat time:int min:double avg:double max:double = en
|
|||
engine.validator.perfTimerStatsByName name:string stats:(vector engine.validator.OnePerfTimerStat) = engine.validator.PerfTimerStatsByName;
|
||||
engine.validator.perfTimerStats stats:(vector engine.validator.PerfTimerStatsByName) = engine.validator.PerfTimerStats;
|
||||
|
||||
engine.validator.shardOutQueueSize size:int = engine.validator.ShardOutQueueSize;
|
||||
engine.validator.shardOutQueueSize size:long = engine.validator.ShardOutQueueSize;
|
||||
|
||||
|
||||
---functions---
|
||||
|
@ -715,6 +720,9 @@ engine.validator.showCustomOverlays = engine.validator.CustomOverlaysConfig;
|
|||
|
||||
engine.validator.setStateSerializerEnabled enabled:Bool = engine.validator.Success;
|
||||
|
||||
engine.validator.setCollatorOptionsJson json:string = engine.validator.Success;
|
||||
engine.validator.getCollatorOptionsJson = engine.validator.JsonConfig;
|
||||
|
||||
---types---
|
||||
|
||||
storage.pong = storage.Pong;
|
||||
|
|
Binary file not shown.
|
@ -230,6 +230,8 @@ blocks.blockSignatures id:ton.blockIdExt signatures:(vector blocks.signature) =
|
|||
blocks.shardBlockLink id:ton.blockIdExt proof:bytes = blocks.ShardBlockLink;
|
||||
blocks.blockLinkBack to_key_block:Bool from:ton.blockIdExt to:ton.blockIdExt dest_proof:bytes proof:bytes state_proof:bytes = blocks.BlockLinkBack;
|
||||
blocks.shardBlockProof from:ton.blockIdExt mc_id:ton.blockIdExt links:(vector blocks.shardBlockLink) mc_proof:(vector blocks.blockLinkBack) = blocks.ShardBlockProof;
|
||||
blocks.outMsgQueueSize id:ton.blockIdExt size:int32 = blocks.OutMsgQueueSize;
|
||||
blocks.outMsgQueueSizes shards:(vector blocks.outMsgQueueSize) ext_msg_queue_size_limit:int32 = blocks.OutMsgQueueSizes;
|
||||
|
||||
configInfo config:tvm.cell = ConfigInfo;
|
||||
|
||||
|
@ -309,6 +311,7 @@ smc.forget id:int53 = Ok;
|
|||
smc.getCode id:int53 = tvm.Cell;
|
||||
smc.getData id:int53 = tvm.Cell;
|
||||
smc.getState id:int53 = tvm.Cell;
|
||||
smc.getRawFullAccountState id:int53 = raw.FullAccountState;
|
||||
smc.runGetMethod id:int53 method:smc.MethodId stack:vector<tvm.StackEntry> = smc.RunResult;
|
||||
|
||||
smc.getLibraries library_list:(vector int256) = smc.LibraryResult;
|
||||
|
@ -331,6 +334,7 @@ blocks.getTransactionsExt id:ton.blockIdExt mode:# count:# after:blocks.accountT
|
|||
blocks.getBlockHeader id:ton.blockIdExt = blocks.Header;
|
||||
blocks.getMasterchainBlockSignatures seqno:int32 = blocks.BlockSignatures;
|
||||
blocks.getShardBlockProof id:ton.blockIdExt mode:# from:mode.0?ton.blockIdExt = blocks.ShardBlockProof;
|
||||
blocks.getOutMsgQueueSizes mode:# wc:mode.0?int32 shard:mode.0?int64 = blocks.OutMsgQueueSizes;
|
||||
|
||||
onLiteServerQueryResult id:int64 bytes:bytes = Ok;
|
||||
onLiteServerQueryError id:int64 error:error = Ok;
|
||||
|
|
Binary file not shown.
|
@ -57,7 +57,10 @@ enum GlobalCapabilities {
|
|||
capBounceMsgBody = 4,
|
||||
capReportVersion = 8,
|
||||
capSplitMergeTransactions = 16,
|
||||
capShortDequeue = 32
|
||||
capShortDequeue = 32,
|
||||
capStoreOutMsgQueueSize = 64,
|
||||
capMsgMetadata = 128,
|
||||
capDeferMessages = 256
|
||||
};
|
||||
|
||||
inline int shard_pfx_len(ShardId shard) {
|
||||
|
|
|
@ -1971,7 +1971,7 @@ class RunEmulator : public TonlibQueryActor {
|
|||
check(r_config.move_as_error());
|
||||
return;
|
||||
}
|
||||
std::unique_ptr<block::ConfigInfo> config = r_config.move_as_ok();
|
||||
std::shared_ptr<block::ConfigInfo> config = r_config.move_as_ok();
|
||||
|
||||
auto r_shard_account = account_state_->to_shardAccountCellSlice();
|
||||
if (r_shard_account.is_error()) {
|
||||
|
@ -1995,7 +1995,7 @@ class RunEmulator : public TonlibQueryActor {
|
|||
return;
|
||||
}
|
||||
vm::Dictionary libraries = global_libraries_;
|
||||
emulator::TransactionEmulator trans_emulator(std::move(*config));
|
||||
emulator::TransactionEmulator trans_emulator(config);
|
||||
trans_emulator.set_prev_blocks_info(prev_blocks_info.move_as_ok());
|
||||
trans_emulator.set_libs(std::move(libraries));
|
||||
trans_emulator.set_rand_seed(block_id_.rand_seed);
|
||||
|
@ -4360,6 +4360,17 @@ td::Status TonlibClient::do_request(const tonlib_api::smc_getState& request,
|
|||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status TonlibClient::do_request(const tonlib_api::smc_getRawFullAccountState& request,
|
||||
td::Promise<object_ptr<tonlib_api::raw_fullAccountState>>&& promise) {
|
||||
auto it = smcs_.find(request.id_);
|
||||
if (it == smcs_.end()) {
|
||||
return TonlibError::InvalidSmcId();
|
||||
}
|
||||
auto& acc = it->second;
|
||||
promise.set_result(acc->to_raw_fullAccountState());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
bool is_list(vm::StackEntry entry) {
|
||||
while (true) {
|
||||
if (entry.type() == vm::StackEntry::Type::t_null) {
|
||||
|
@ -5563,6 +5574,11 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_lookupBlock& reques
|
|||
client_.with_last_block(
|
||||
[self = this, blkid, lite_block = std::move(lite_block), mode = request.mode_, lt = (td::uint64)request.lt_,
|
||||
utime = (td::uint32)request.utime_, promise = std::move(promise)](td::Result<LastBlockState> r_last_block) mutable {
|
||||
if (r_last_block.is_error()) {
|
||||
promise.set_error(r_last_block.move_as_error_prefix(TonlibError::Internal("get last block failed ")));
|
||||
return;
|
||||
}
|
||||
|
||||
self->client_.send_query(ton::lite_api::liteServer_lookupBlockWithProof(mode, std::move(lite_block), ton::create_tl_lite_block_id(r_last_block.ok().last_block_id), lt, utime),
|
||||
promise.wrap([blkid, mode, utime, lt, last_block = r_last_block.ok().last_block_id](lite_api_ptr<ton::lite_api::liteServer_lookupBlockResult>&& result)
|
||||
-> td::Result<object_ptr<tonlib_api::ton_blockIdExt>> {
|
||||
|
@ -6031,6 +6047,24 @@ td::Status TonlibClient::do_request(const tonlib_api::blocks_getShardBlockProof&
|
|||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status TonlibClient::do_request(const tonlib_api::blocks_getOutMsgQueueSizes& request,
|
||||
td::Promise<object_ptr<tonlib_api::blocks_outMsgQueueSizes>>&& promise) {
|
||||
client_.send_query(ton::lite_api::liteServer_getOutMsgQueueSizes(request.mode_, request.wc_, request.shard_),
|
||||
promise.wrap([](lite_api_ptr<ton::lite_api::liteServer_outMsgQueueSizes>&& queue_sizes) {
|
||||
tonlib_api::blocks_outMsgQueueSizes result;
|
||||
result.ext_msg_queue_size_limit_ = queue_sizes->ext_msg_queue_size_limit_;
|
||||
for (auto &x : queue_sizes->shards_) {
|
||||
tonlib_api::blocks_outMsgQueueSize shard;
|
||||
shard.id_ = to_tonlib_api(*x->id_);
|
||||
shard.size_ = x->size_;
|
||||
result.shards_.push_back(tonlib_api::make_object<tonlib_api::blocks_outMsgQueueSize>(std::move(shard)));
|
||||
}
|
||||
return tonlib_api::make_object<tonlib_api::blocks_outMsgQueueSizes>(std::move(result));
|
||||
}));
|
||||
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
void TonlibClient::load_libs_from_disk() {
|
||||
LOG(DEBUG) << "loading libraries from disk cache";
|
||||
auto r_data = kv_->get("tonlib.libcache");
|
||||
|
|
|
@ -324,6 +324,8 @@ class TonlibClient : public td::actor::Actor {
|
|||
td::Promise<object_ptr<tonlib_api::tvm_cell>>&& promise);
|
||||
td::Status do_request(const tonlib_api::smc_getState& request,
|
||||
td::Promise<object_ptr<tonlib_api::tvm_cell>>&& promise);
|
||||
td::Status do_request(const tonlib_api::smc_getRawFullAccountState& request,
|
||||
td::Promise<object_ptr<tonlib_api::raw_fullAccountState>>&& promise);
|
||||
|
||||
td::Status do_request(const tonlib_api::smc_runGetMethod& request,
|
||||
td::Promise<object_ptr<tonlib_api::smc_runResult>>&& promise);
|
||||
|
@ -390,6 +392,8 @@ class TonlibClient : public td::actor::Actor {
|
|||
td::Promise<object_ptr<tonlib_api::blocks_blockSignatures>>&& promise);
|
||||
td::Status do_request(const tonlib_api::blocks_getShardBlockProof& request,
|
||||
td::Promise<object_ptr<tonlib_api::blocks_shardBlockProof>>&& promise);
|
||||
td::Status do_request(const tonlib_api::blocks_getOutMsgQueueSizes& request,
|
||||
td::Promise<object_ptr<tonlib_api::blocks_outMsgQueueSizes>>&& promise);
|
||||
|
||||
void get_config_param(int32_t param, int32_t mode, ton::BlockIdExt block,
|
||||
td::Promise<object_ptr<tonlib_api::configInfo>>&& promise);
|
||||
|
|
|
@ -1361,10 +1361,10 @@ class TonlibCli : public td::actor::Actor {
|
|||
}
|
||||
if (l >= 3 && (str[0] == 'x' || str[0] == 'b') && str[1] == '{' && str.back() == '}') {
|
||||
unsigned char buff[128];
|
||||
int bits =
|
||||
(str[0] == 'x')
|
||||
? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1)
|
||||
: (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff), str.begin() + 2, str.end() - 1);
|
||||
int bits = (str[0] == 'x') ? (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), str.begin() + 2,
|
||||
str.end() - 1)
|
||||
: (int)td::bitstring::parse_bitstring_binary_literal(buff, sizeof(buff) * 8,
|
||||
str.begin() + 2, str.end() - 1);
|
||||
if (bits < 0) {
|
||||
return td::Status::Error("Failed to parse slice");
|
||||
}
|
||||
|
|
|
@ -1203,3 +1203,63 @@ td::Status SetStateSerializerEnabledQuery::receive(td::BufferSlice data) {
|
|||
td::TerminalIO::out() << "success\n";
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status SetCollatorOptionsJsonQuery::run() {
|
||||
TRY_RESULT_ASSIGN(file_name_, tokenizer_.get_token<std::string>());
|
||||
TRY_STATUS(tokenizer_.check_endl());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status SetCollatorOptionsJsonQuery::send() {
|
||||
TRY_RESULT(data, td::read_file(file_name_));
|
||||
auto b =
|
||||
ton::create_serialize_tl_object<ton::ton_api::engine_validator_setCollatorOptionsJson>(data.as_slice().str());
|
||||
td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status SetCollatorOptionsJsonQuery::receive(td::BufferSlice data) {
|
||||
TRY_RESULT_PREFIX(f, ton::fetch_tl_object<ton::ton_api::engine_validator_success>(data.as_slice(), true),
|
||||
"received incorrect answer: ");
|
||||
td::TerminalIO::out() << "success\n";
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status ResetCollatorOptionsQuery::run() {
|
||||
TRY_STATUS(tokenizer_.check_endl());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status ResetCollatorOptionsQuery::send() {
|
||||
auto b = ton::create_serialize_tl_object<ton::ton_api::engine_validator_setCollatorOptionsJson>("{}");
|
||||
td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status ResetCollatorOptionsQuery::receive(td::BufferSlice data) {
|
||||
TRY_RESULT_PREFIX(f, ton::fetch_tl_object<ton::ton_api::engine_validator_success>(data.as_slice(), true),
|
||||
"received incorrect answer: ");
|
||||
td::TerminalIO::out() << "success\n";
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status GetCollatorOptionsJsonQuery::run() {
|
||||
TRY_RESULT_ASSIGN(file_name_, tokenizer_.get_token<std::string>());
|
||||
TRY_STATUS(tokenizer_.check_endl());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status GetCollatorOptionsJsonQuery::send() {
|
||||
auto b =
|
||||
ton::create_serialize_tl_object<ton::ton_api::engine_validator_getCollatorOptionsJson>();
|
||||
td::actor::send_closure(console_, &ValidatorEngineConsole::envelope_send_query, std::move(b), create_promise());
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
||||
td::Status GetCollatorOptionsJsonQuery::receive(td::BufferSlice data) {
|
||||
TRY_RESULT_PREFIX(f, ton::fetch_tl_object<ton::ton_api::engine_validator_jsonConfig>(data.as_slice(), true),
|
||||
"received incorrect answer: ");
|
||||
TRY_STATUS(td::write_file(file_name_, f->data_));
|
||||
td::TerminalIO::out() << "saved config to " << file_name_ << "\n";
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
|
|
@ -1229,3 +1229,66 @@ class SetStateSerializerEnabledQuery : public Query {
|
|||
private:
|
||||
bool enabled_;
|
||||
};
|
||||
|
||||
class SetCollatorOptionsJsonQuery : public Query {
|
||||
public:
|
||||
SetCollatorOptionsJsonQuery(td::actor::ActorId<ValidatorEngineConsole> console, Tokenizer tokenizer)
|
||||
: Query(console, std::move(tokenizer)) {
|
||||
}
|
||||
td::Status run() override;
|
||||
td::Status send() override;
|
||||
td::Status receive(td::BufferSlice data) override;
|
||||
static std::string get_name() {
|
||||
return "setcollatoroptionsjson";
|
||||
}
|
||||
static std::string get_help() {
|
||||
return "setcollatoroptionsjson <filename>\tset collator options from file <filename>";
|
||||
}
|
||||
std::string name() const override {
|
||||
return get_name();
|
||||
}
|
||||
|
||||
private:
|
||||
std::string file_name_;
|
||||
};
|
||||
|
||||
class ResetCollatorOptionsQuery : public Query {
|
||||
public:
|
||||
ResetCollatorOptionsQuery(td::actor::ActorId<ValidatorEngineConsole> console, Tokenizer tokenizer)
|
||||
: Query(console, std::move(tokenizer)) {
|
||||
}
|
||||
td::Status run() override;
|
||||
td::Status send() override;
|
||||
td::Status receive(td::BufferSlice data) override;
|
||||
static std::string get_name() {
|
||||
return "resetcollatoroptions";
|
||||
}
|
||||
static std::string get_help() {
|
||||
return "resetcollatoroptions\tset collator options to default values";
|
||||
}
|
||||
std::string name() const override {
|
||||
return get_name();
|
||||
}
|
||||
};
|
||||
|
||||
class GetCollatorOptionsJsonQuery : public Query {
|
||||
public:
|
||||
GetCollatorOptionsJsonQuery(td::actor::ActorId<ValidatorEngineConsole> console, Tokenizer tokenizer)
|
||||
: Query(console, std::move(tokenizer)) {
|
||||
}
|
||||
td::Status run() override;
|
||||
td::Status send() override;
|
||||
td::Status receive(td::BufferSlice data) override;
|
||||
static std::string get_name() {
|
||||
return "getcollatoroptionsjson";
|
||||
}
|
||||
static std::string get_help() {
|
||||
return "getcollatoroptionsjson <filename>\tsave current collator options to file <filename>";
|
||||
}
|
||||
std::string name() const override {
|
||||
return get_name();
|
||||
}
|
||||
|
||||
private:
|
||||
std::string file_name_;
|
||||
};
|
||||
|
|
|
@ -147,6 +147,9 @@ void ValidatorEngineConsole::run() {
|
|||
add_query_runner(std::make_unique<QueryRunnerImpl<DelCustomOverlayQuery>>());
|
||||
add_query_runner(std::make_unique<QueryRunnerImpl<ShowCustomOverlaysQuery>>());
|
||||
add_query_runner(std::make_unique<QueryRunnerImpl<SetStateSerializerEnabledQuery>>());
|
||||
add_query_runner(std::make_unique<QueryRunnerImpl<SetCollatorOptionsJsonQuery>>());
|
||||
add_query_runner(std::make_unique<QueryRunnerImpl<ResetCollatorOptionsQuery>>());
|
||||
add_query_runner(std::make_unique<QueryRunnerImpl<GetCollatorOptionsJsonQuery>>());
|
||||
}
|
||||
|
||||
bool ValidatorEngineConsole::envelope_send_query(td::BufferSlice query, td::Promise<td::BufferSlice> promise) {
|
||||
|
|
|
@ -14,5 +14,9 @@ add_executable(validator-engine ${VALIDATOR_ENGINE_SOURCE})
|
|||
target_link_libraries(validator-engine overlay tdutils tdactor adnl tl_api dht
|
||||
rldp rldp2 catchain validatorsession full-node validator ton_validator validator
|
||||
fift-lib memprof git ${JEMALLOC_LIBRARIES})
|
||||
if (JEMALLOC_FOUND)
|
||||
target_include_directories(validator-engine PRIVATE ${JEMALLOC_INCLUDE_DIR})
|
||||
target_compile_definitions(validator-engine PRIVATE -DTON_USE_JEMALLOC=1)
|
||||
endif()
|
||||
|
||||
install(TARGETS validator-engine RUNTIME DESTINATION bin)
|
||||
|
|
|
@ -75,6 +75,10 @@
|
|||
#include "block/precompiled-smc/PrecompiledSmartContract.h"
|
||||
#include "interfaces/validator-manager.h"
|
||||
|
||||
#if TON_USE_JEMALLOC
|
||||
#include <jemalloc/jemalloc.h>
|
||||
#endif
|
||||
|
||||
Config::Config() {
|
||||
out_port = 3278;
|
||||
full_node = ton::PublicKeyHash::zero();
|
||||
|
@ -1179,6 +1183,55 @@ class CheckDhtServerStatusQuery : public td::actor::Actor {
|
|||
td::Promise<td::BufferSlice> promise_;
|
||||
};
|
||||
|
||||
#if TON_USE_JEMALLOC
|
||||
class JemallocStatsWriter : public td::actor::Actor {
|
||||
public:
|
||||
void start_up() override {
|
||||
alarm();
|
||||
}
|
||||
|
||||
void alarm() override {
|
||||
alarm_timestamp() = td::Timestamp::in(60.0);
|
||||
auto r_stats = get_stats();
|
||||
if (r_stats.is_error()) {
|
||||
LOG(WARNING) << "Jemalloc stats error : " << r_stats.move_as_error();
|
||||
} else {
|
||||
auto s = r_stats.move_as_ok();
|
||||
LOG(WARNING) << "JEMALLOC_STATS : [ timestamp=" << (ton::UnixTime)td::Clocks::system()
|
||||
<< " allocated=" << s.allocated << " active=" << s.active << " metadata=" << s.metadata
|
||||
<< " resident=" << s.resident << " ]";
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
struct JemallocStats {
|
||||
size_t allocated, active, metadata, resident;
|
||||
};
|
||||
|
||||
static td::Result<JemallocStats> get_stats() {
|
||||
size_t sz = sizeof(size_t);
|
||||
static size_t epoch = 1;
|
||||
if (mallctl("epoch", &epoch, &sz, &epoch, sz)) {
|
||||
return td::Status::Error("Failed to refrash stats");
|
||||
}
|
||||
JemallocStats stats;
|
||||
if (mallctl("stats.allocated", &stats.allocated, &sz, nullptr, 0)) {
|
||||
return td::Status::Error("Cannot get stats.allocated");
|
||||
}
|
||||
if (mallctl("stats.active", &stats.active, &sz, nullptr, 0)) {
|
||||
return td::Status::Error("Cannot get stats.active");
|
||||
}
|
||||
if (mallctl("stats.metadata", &stats.metadata, &sz, nullptr, 0)) {
|
||||
return td::Status::Error("Cannot get stats.metadata");
|
||||
}
|
||||
if (mallctl("stats.resident", &stats.resident, &sz, nullptr, 0)) {
|
||||
return td::Status::Error("Cannot get stats.resident");
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
||||
void ValidatorEngine::set_local_config(std::string str) {
|
||||
local_config_ = str;
|
||||
}
|
||||
|
@ -1202,6 +1255,9 @@ void ValidatorEngine::schedule_shutdown(double at) {
|
|||
}
|
||||
void ValidatorEngine::start_up() {
|
||||
alarm_timestamp() = td::Timestamp::in(1.0 + td::Random::fast(0, 100) * 0.01);
|
||||
#if TON_USE_JEMALLOC
|
||||
td::actor::create_actor<JemallocStatsWriter>("mem-stat").release();
|
||||
#endif
|
||||
}
|
||||
|
||||
void ValidatorEngine::alarm() {
|
||||
|
@ -1411,7 +1467,18 @@ td::Status ValidatorEngine::load_global_config() {
|
|||
h.push_back(b);
|
||||
}
|
||||
validator_options_.write().set_hardforks(std::move(h));
|
||||
validator_options_.write().set_state_serializer_enabled(config_.state_serializer_enabled);
|
||||
|
||||
auto r_total_ram = td::get_total_ram();
|
||||
if (r_total_ram.is_error()) {
|
||||
LOG(ERROR) << "Failed to get total RAM size: " << r_total_ram.move_as_error();
|
||||
} else {
|
||||
td::uint64 total_ram = r_total_ram.move_as_ok();
|
||||
LOG(WARNING) << "Total RAM = " << td::format::as_size(total_ram);
|
||||
if (total_ram >= (90ULL << 30)) {
|
||||
fast_state_serializer_enabled_ = true;
|
||||
}
|
||||
}
|
||||
validator_options_.write().set_fast_state_serializer_enabled(fast_state_serializer_enabled_);
|
||||
|
||||
return td::Status::OK();
|
||||
}
|
||||
|
@ -1823,6 +1890,9 @@ void ValidatorEngine::started_overlays() {
|
|||
|
||||
void ValidatorEngine::start_validator() {
|
||||
validator_options_.write().set_allow_blockchain_init(config_.validators.size() > 0);
|
||||
validator_options_.write().set_state_serializer_enabled(config_.state_serializer_enabled);
|
||||
load_collator_options();
|
||||
|
||||
validator_manager_ = ton::validator::ValidatorManagerFactory::create(
|
||||
validator_options_, db_root_, keyring_.get(), adnl_.get(), rldp_.get(), overlay_manager_.get());
|
||||
|
||||
|
@ -2413,6 +2483,69 @@ void ValidatorEngine::del_custom_overlay_from_config(std::string name, td::Promi
|
|||
promise.set_error(td::Status::Error(PSTRING() << "no overlay \"" << name << "\" in config"));
|
||||
}
|
||||
|
||||
static td::Result<td::Ref<ton::validator::CollatorOptions>> parse_collator_options(td::MutableSlice json_str) {
|
||||
td::Ref<ton::validator::CollatorOptions> ref{true};
|
||||
ton::validator::CollatorOptions& opts = ref.write();
|
||||
|
||||
// Set default values (from_json leaves missing fields as is)
|
||||
ton::ton_api::engine_validator_collatorOptions f;
|
||||
f.deferring_enabled_ = opts.deferring_enabled;
|
||||
f.defer_out_queue_size_limit_ = opts.defer_out_queue_size_limit;
|
||||
f.defer_messages_after_ = opts.defer_messages_after;
|
||||
f.dispatch_phase_2_max_total_ = opts.dispatch_phase_2_max_total;
|
||||
f.dispatch_phase_3_max_total_ = opts.dispatch_phase_3_max_total;
|
||||
f.dispatch_phase_2_max_per_initiator_ = opts.dispatch_phase_2_max_per_initiator;
|
||||
f.dispatch_phase_3_max_per_initiator_ =
|
||||
opts.dispatch_phase_3_max_per_initiator ? opts.dispatch_phase_3_max_per_initiator.value() : -1;
|
||||
|
||||
TRY_RESULT_PREFIX(json, td::json_decode(json_str), "failed to parse json: ");
|
||||
TRY_STATUS_PREFIX(ton::ton_api::from_json(f, json.get_object()), "json does not fit TL scheme: ");
|
||||
|
||||
if (f.defer_messages_after_ <= 0) {
|
||||
return td::Status::Error("defer_messages_after should be positive");
|
||||
}
|
||||
if (f.defer_out_queue_size_limit_ < 0) {
|
||||
return td::Status::Error("defer_out_queue_size_limit should be non-negative");
|
||||
}
|
||||
if (f.dispatch_phase_2_max_total_ < 0) {
|
||||
return td::Status::Error("dispatch_phase_2_max_total should be non-negative");
|
||||
}
|
||||
if (f.dispatch_phase_3_max_total_ < 0) {
|
||||
return td::Status::Error("dispatch_phase_3_max_total should be non-negative");
|
||||
}
|
||||
if (f.dispatch_phase_2_max_per_initiator_ < 0) {
|
||||
return td::Status::Error("dispatch_phase_2_max_per_initiator should be non-negative");
|
||||
}
|
||||
|
||||
opts.deferring_enabled = f.deferring_enabled_;
|
||||
opts.defer_messages_after = f.defer_messages_after_;
|
||||
opts.defer_out_queue_size_limit = f.defer_out_queue_size_limit_;
|
||||
opts.dispatch_phase_2_max_total = f.dispatch_phase_2_max_total_;
|
||||
opts.dispatch_phase_3_max_total = f.dispatch_phase_3_max_total_;
|
||||
opts.dispatch_phase_2_max_per_initiator = f.dispatch_phase_2_max_per_initiator_;
|
||||
if (f.dispatch_phase_3_max_per_initiator_ >= 0) {
|
||||
opts.dispatch_phase_3_max_per_initiator = f.dispatch_phase_3_max_per_initiator_;
|
||||
} else {
|
||||
opts.dispatch_phase_3_max_per_initiator = {};
|
||||
}
|
||||
|
||||
return ref;
|
||||
}
|
||||
|
||||
void ValidatorEngine::load_collator_options() {
|
||||
auto r_data = td::read_file(collator_options_file());
|
||||
if (r_data.is_error()) {
|
||||
return;
|
||||
}
|
||||
td::BufferSlice data = r_data.move_as_ok();
|
||||
auto r_collator_options = parse_collator_options(data.as_slice());
|
||||
if (r_collator_options.is_error()) {
|
||||
LOG(ERROR) << "Failed to read collator options from file: " << r_collator_options.move_as_error();
|
||||
return;
|
||||
}
|
||||
validator_options_.write().set_collator_options(r_collator_options.move_as_ok());
|
||||
}
|
||||
|
||||
void ValidatorEngine::check_key(ton::PublicKeyHash id, td::Promise<td::Unit> promise) {
|
||||
if (keys_.count(id) == 1) {
|
||||
promise.set_value(td::Unit());
|
||||
|
@ -3492,7 +3625,7 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_getShardO
|
|||
if (!dest) {
|
||||
td::actor::send_closure(
|
||||
manager, &ton::validator::ValidatorManagerInterface::get_out_msg_queue_size, handle->id(),
|
||||
[promise = std::move(promise)](td::Result<td::uint32> R) mutable {
|
||||
[promise = std::move(promise)](td::Result<td::uint64> R) mutable {
|
||||
if (R.is_error()) {
|
||||
promise.set_value(create_control_query_error(R.move_as_error_prefix("failed to get queue size: ")));
|
||||
} else {
|
||||
|
@ -3683,6 +3816,53 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setStateS
|
|||
});
|
||||
}
|
||||
|
||||
void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_setCollatorOptionsJson &query,
|
||||
td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm,
|
||||
td::Promise<td::BufferSlice> promise) {
|
||||
if (!(perm & ValidatorEnginePermissions::vep_modify)) {
|
||||
promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::error, "not authorized")));
|
||||
return;
|
||||
}
|
||||
if (!started_) {
|
||||
promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::notready, "not started")));
|
||||
return;
|
||||
}
|
||||
auto r_collator_options = parse_collator_options(query.json_);
|
||||
if (r_collator_options.is_error()) {
|
||||
promise.set_value(create_control_query_error(r_collator_options.move_as_error_prefix("failed to parse json: ")));
|
||||
return;
|
||||
}
|
||||
auto S = td::write_file(collator_options_file(), query.json_);
|
||||
if (S.is_error()) {
|
||||
promise.set_value(create_control_query_error(r_collator_options.move_as_error_prefix("failed to write file: ")));
|
||||
return;
|
||||
}
|
||||
validator_options_.write().set_collator_options(r_collator_options.move_as_ok());
|
||||
td::actor::send_closure(validator_manager_, &ton::validator::ValidatorManagerInterface::update_options,
|
||||
validator_options_);
|
||||
promise.set_value(ton::create_serialize_tl_object<ton::ton_api::engine_validator_success>());
|
||||
}
|
||||
|
||||
void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_getCollatorOptionsJson &query,
|
||||
td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm,
|
||||
td::Promise<td::BufferSlice> promise) {
|
||||
if (!(perm & ValidatorEnginePermissions::vep_default)) {
|
||||
promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::error, "not authorized")));
|
||||
return;
|
||||
}
|
||||
if (!started_) {
|
||||
promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::notready, "not started")));
|
||||
return;
|
||||
}
|
||||
auto r_data = td::read_file(collator_options_file());
|
||||
if (r_data.is_error()) {
|
||||
promise.set_value(ton::create_serialize_tl_object<ton::ton_api::engine_validator_jsonConfig>("{}"));
|
||||
} else {
|
||||
promise.set_value(
|
||||
ton::create_serialize_tl_object<ton::ton_api::engine_validator_jsonConfig>(r_data.ok().as_slice().str()));
|
||||
}
|
||||
}
|
||||
|
||||
void ValidatorEngine::process_control_query(td::uint16 port, ton::adnl::AdnlNodeIdShort src,
|
||||
ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data,
|
||||
td::Promise<td::BufferSlice> promise) {
|
||||
|
@ -3782,7 +3962,7 @@ void need_scheduler_status(int sig) {
|
|||
need_scheduler_status_flag.store(true);
|
||||
}
|
||||
|
||||
void dump_memory_stats() {
|
||||
void dump_memprof_stats() {
|
||||
if (!is_memprof_on()) {
|
||||
return;
|
||||
}
|
||||
|
@ -3807,8 +3987,20 @@ void dump_memory_stats() {
|
|||
LOG(WARNING) << td::tag("fast_backtrace_success_rate", get_fast_backtrace_success_rate());
|
||||
}
|
||||
|
||||
void dump_jemalloc_prof() {
|
||||
#if TON_USE_JEMALLOC
|
||||
const char *filename = "/tmp/validator-jemalloc.dump";
|
||||
if (mallctl("prof.dump", nullptr, nullptr, &filename, sizeof(const char *)) == 0) {
|
||||
LOG(ERROR) << "Written jemalloc dump to " << filename;
|
||||
} else {
|
||||
LOG(ERROR) << "Failed to write jemalloc dump to " << filename;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void dump_stats() {
|
||||
dump_memory_stats();
|
||||
dump_memprof_stats();
|
||||
dump_jemalloc_prof();
|
||||
LOG(WARNING) << td::NamedThreadSafeCounter::get_default();
|
||||
}
|
||||
|
||||
|
@ -4046,6 +4238,13 @@ int main(int argc, char *argv[]) {
|
|||
acts.push_back([&x, v]() { td::actor::send_closure(x, &ValidatorEngine::set_catchain_max_block_delay, v); });
|
||||
return td::Status::OK();
|
||||
});
|
||||
p.add_option(
|
||||
'\0', "fast-state-serializer",
|
||||
"faster persistent state serializer, but requires more RAM (enabled automatically on machines with >= 90GB RAM)",
|
||||
[&]() {
|
||||
acts.push_back(
|
||||
[&x]() { td::actor::send_closure(x, &ValidatorEngine::set_fast_state_serializer_enabled, true); });
|
||||
});
|
||||
auto S = p.run(argc, argv);
|
||||
if (S.is_error()) {
|
||||
LOG(ERROR) << "failed to parse options: " << S.move_as_error();
|
||||
|
|
|
@ -220,6 +220,7 @@ class ValidatorEngine : public td::actor::Actor {
|
|||
bool started_ = false;
|
||||
ton::BlockSeqno truncate_seqno_{0};
|
||||
std::string session_logs_file_;
|
||||
bool fast_state_serializer_enabled_ = false;
|
||||
|
||||
std::set<ton::CatchainSeqno> unsafe_catchains_;
|
||||
std::map<ton::BlockSeqno, std::pair<ton::CatchainSeqno, td::uint32>> unsafe_catchain_rotations_;
|
||||
|
@ -299,6 +300,9 @@ class ValidatorEngine : public td::actor::Actor {
|
|||
void set_catchain_max_block_delay(double value) {
|
||||
catchain_max_block_delay_ = value;
|
||||
}
|
||||
void set_fast_state_serializer_enabled(bool value) {
|
||||
fast_state_serializer_enabled_ = value;
|
||||
}
|
||||
void start_up() override;
|
||||
ValidatorEngine() {
|
||||
}
|
||||
|
@ -384,12 +388,16 @@ class ValidatorEngine : public td::actor::Actor {
|
|||
std::string custom_overlays_config_file() const {
|
||||
return db_root_ + "/custom-overlays.json";
|
||||
}
|
||||
std::string collator_options_file() const {
|
||||
return db_root_ + "/collator-options.json";
|
||||
}
|
||||
|
||||
void load_custom_overlays_config();
|
||||
td::Status write_custom_overlays_config();
|
||||
void add_custom_overlay_to_config(
|
||||
ton::tl_object_ptr<ton::ton_api::engine_validator_customOverlay> overlay, td::Promise<td::Unit> promise);
|
||||
void del_custom_overlay_from_config(std::string name, td::Promise<td::Unit> promise);
|
||||
void load_collator_options();
|
||||
|
||||
void check_key(ton::PublicKeyHash id, td::Promise<td::Unit> promise);
|
||||
|
||||
|
@ -477,6 +485,10 @@ class ValidatorEngine : public td::actor::Actor {
|
|||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||
void run_control_query(ton::ton_api::engine_validator_setStateSerializerEnabled &query, td::BufferSlice data,
|
||||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||
void run_control_query(ton::ton_api::engine_validator_setCollatorOptionsJson &query, td::BufferSlice data,
|
||||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||
void run_control_query(ton::ton_api::engine_validator_getCollatorOptionsJson &query, td::BufferSlice data,
|
||||
ton::PublicKeyHash src, td::uint32 perm, td::Promise<td::BufferSlice> promise);
|
||||
template <class T>
|
||||
void run_control_query(T &query, td::BufferSlice data, ton::PublicKeyHash src, td::uint32 perm,
|
||||
td::Promise<td::BufferSlice> promise) {
|
||||
|
|
|
@ -90,7 +90,7 @@ class ValidatorSessionImpl : public ValidatorSession {
|
|||
td::actor::ActorOwn<catchain::CatChain> catchain_;
|
||||
std::unique_ptr<ValidatorSessionDescription> description_;
|
||||
|
||||
double catchain_max_block_delay_ = 0.5;
|
||||
double catchain_max_block_delay_ = 0.4;
|
||||
|
||||
void on_new_round(td::uint32 round);
|
||||
void on_catchain_started();
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue