diff --git a/.clang_complete b/.clang_complete index d7cda0ff..e88dbc7b 100644 --- a/.clang_complete +++ b/.clang_complete @@ -1,5 +1,5 @@ -xc++ --std=c++14 +-std=c++17 -iquote . -iquote tdtl/ -iquote tl/ diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 00000000..78c6ddee --- /dev/null +++ b/.editorconfig @@ -0,0 +1,8 @@ +root = true + +[*] +end_of_line = lf +insert_final_newline = true +charset = utf-8 +indent_style = space +indent_size = 2 diff --git a/.github/script/fift-func-wasm-build-ubuntu.sh b/.github/script/fift-func-wasm-build-ubuntu.sh deleted file mode 100755 index 95de71f9..00000000 --- a/.github/script/fift-func-wasm-build-ubuntu.sh +++ /dev/null @@ -1,99 +0,0 @@ -# The script builds funcfift compiler to WASM - -# dependencies: -#sudo apt-get install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip nodejs libevent-dev - -export CC=$(which clang) -export CXX=$(which clang++) -export CCACHE_DISABLE=1 - -cd ../.. -rm -rf openssl zlib emsdk secp256k1 libsodium build -echo `pwd` - -git clone https://github.com/openssl/openssl.git -cd openssl -git checkout OpenSSL_1_1_1j -./config -make -j16 -OPENSSL_DIR=`pwd` -cd .. - -git clone https://github.com/madler/zlib.git -cd zlib -ZLIB_DIR=`pwd` -cd .. - -git clone https://github.com/bitcoin-core/secp256k1.git -cd secp256k1 -./autogen.sh -SECP256K1_DIR=`pwd` -cd .. - -git clone https://github.com/jedisct1/libsodium --branch stable -cd libsodium -SODIUM_DIR=`pwd` -cd .. - -mkdir build -cd build -cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DZLIB_LIBRARY=/usr/lib/x86_64-linux-gnu/libz.so -DZLIB_INCLUDE_DIR=$ZLIB_DIR -DOPENSSL_ROOT_DIR=$OPENSSL_DIR -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so -DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.so -DTON_USE_ABSEIL=OFF .. - -test $? -eq 0 || { echo "Can't configure TON build"; exit 1; } - -ninja fift smc-envelope - -test $? -eq 0 || { echo "Can't compile fift "; exit 1; } - -rm -rf * - -cd .. - -git clone https://github.com/emscripten-core/emsdk.git -cd emsdk -./emsdk install 3.1.19 -./emsdk activate 3.1.19 -EMSDK_DIR=`pwd` - -source $EMSDK_DIR/emsdk_env.sh -export CC=$(which emcc) -export CXX=$(which em++) -export CCACHE_DISABLE=1 - -cd ../openssl - -make clean -emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test no-ui -sed -i 's/CROSS_COMPILE=.*/CROSS_COMPILE=/g' Makefile -sed -i 's/-ldl//g' Makefile -sed -i 's/-O3/-Os/g' Makefile -emmake make depend -emmake make -j16 -test $? -eq 0 || { echo "Can't compile OpenSSL with emmake "; exit 1; } - -cd ../zlib - -emconfigure ./configure --static -emmake make -j16 -test $? -eq 0 || { echo "Can't compile zlib with emmake "; exit 1; } -ZLIB_DIR=`pwd` - -cd ../secp256k1 - -emconfigure ./configure --enable-module-recovery -emmake make -j16 -test $? -eq 0 || { echo "Can't compile secp256k1 with emmake "; exit 1; } - -cd ../libsodium - -emconfigure ./configure --disable-ssp -emmake make -j16 -test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; } - -cd ../build - -emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release -DZLIB_LIBRARY=$ZLIB_DIR/libz.a -DZLIB_INCLUDE_DIR=$ZLIB_DIR -DOPENSSL_ROOT_DIR=$OPENSSL_DIR -DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include -DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.a -DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.a -DCMAKE_TOOLCHAIN_FILE=$EMSDK_DIR/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake -DCMAKE_CXX_FLAGS="-sUSE_ZLIB=1" -DSECP256K1_INCLUDE_DIR=$SECP256K1_DIR/include -DSECP256K1_LIBRARY=$SECP256K1_DIR/.libs/libsecp256k1.a -DSODIUM_INCLUDE_DIR=$SODIUM_DIR/src/libsodium/include -DSODIUM_LIBRARY_RELEASE=$SODIUM_DIR/src/libsodium/.libs/libsodium.a -DSODIUM_LIBRARY_DEBUG=$SODIUM_DIR/src/libsodium/.libs/libsodium.a -DSODIUM_USE_STATIC_LIBS=ON .. -test $? -eq 0 || { echo "Can't configure TON with with emmake "; exit 1; } -cp -R ../crypto/smartcont ../crypto/fift/lib crypto - -emmake make -j16 funcfiftlib func fift tlbc emulator-emscripten diff --git a/.github/workflows/build-ton-linux-android-tonlib.yml b/.github/workflows/build-ton-linux-android-tonlib.yml new file mode 100644 index 00000000..ae1be22f --- /dev/null +++ b/.github/workflows/build-ton-linux-android-tonlib.yml @@ -0,0 +1,32 @@ +name: Tonlib Android + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + runs-on: ubuntu-22.04 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Install system libraries + run: | + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build automake libtool texinfo autoconf libgflags-dev \ + zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev \ + libtool autoconf libsodium-dev libsecp256k1-dev + + - name: Build TON + run: | + cp assembly/android/build-android-tonlib.sh . + chmod +x build-android-tonlib.sh + ./build-android-tonlib.sh -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: tonlib-android + path: artifacts \ No newline at end of file diff --git a/.github/workflows/build-ton-linux-x86-64-shared.yml b/.github/workflows/build-ton-linux-x86-64-shared.yml new file mode 100644 index 00000000..166de660 --- /dev/null +++ b/.github/workflows/build-ton-linux-x86-64-shared.yml @@ -0,0 +1,40 @@ +name: Ubuntu TON build (shared, x86-64) + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + strategy: + fail-fast: false + matrix: + os: [ubuntu-20.04, ubuntu-22.04] + runs-on: ${{ matrix.os }} + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Install system libraries + run: | + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + - name: Install clang-16 + run: | + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 16 all + + - name: Build TON + run: | + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh -t -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-binaries-${{ matrix.os }} + path: artifacts diff --git a/.github/workflows/build-ton-macos-x86-64-shared.yml b/.github/workflows/build-ton-macos-x86-64-shared.yml new file mode 100644 index 00000000..c9331e3b --- /dev/null +++ b/.github/workflows/build-ton-macos-x86-64-shared.yml @@ -0,0 +1,25 @@ +name: MacOS TON build (shared, x86-64) + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + runs-on: macos-12 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Build TON + run: | + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-binaries-macos-12 + path: artifacts diff --git a/.github/workflows/build-ton-wasm-emscripten.yml b/.github/workflows/build-ton-wasm-emscripten.yml new file mode 100644 index 00000000..16156b07 --- /dev/null +++ b/.github/workflows/build-ton-wasm-emscripten.yml @@ -0,0 +1,30 @@ +name: Emscripten TON build (wasm) + +on: [push,workflow_dispatch,workflow_call] + +jobs: + build: + runs-on: ubuntu-22.04 + + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Install system libraries + run: | + sudo apt-get update + sudo apt-get install -y build-essential git openssl cmake ninja-build zlib1g-dev libssl-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + - name: Build TON WASM artifacts + run: | + cd assembly/wasm + chmod +x fift-func-wasm-build-ubuntu.sh + ./fift-func-wasm-build-ubuntu.sh -a + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-wasm-binaries + path: artifacts \ No newline at end of file diff --git a/.github/workflows/create-release.yml b/.github/workflows/create-release.yml index 10c20ac0..3d248cfe 100644 --- a/.github/workflows/create-release.yml +++ b/.github/workflows/create-release.yml @@ -46,7 +46,7 @@ jobs: - name: Download Windows artifacts uses: dawidd6/action-download-artifact@v2 with: - workflow: win-2019-compile.yml + workflow: ton-x86-64-windows.yml path: artifacts workflow_conclusion: success skip_unpack: true @@ -54,7 +54,7 @@ jobs: - name: Download and unzip Windows artifacts uses: dawidd6/action-download-artifact@v2 with: - workflow: win-2019-compile.yml + workflow: ton-x86-64-windows.yml path: artifacts workflow_conclusion: success skip_unpack: false @@ -62,7 +62,7 @@ jobs: - name: Download WASM artifacts uses: dawidd6/action-download-artifact@v2 with: - workflow: ton-wasm-emscripten.yml + workflow: build-ton-wasm-emscripten.yml path: artifacts workflow_conclusion: success skip_unpack: true diff --git a/.github/workflows/docker-ubuntu-image.yml b/.github/workflows/docker-ubuntu-image.yml index ca754078..449711d8 100644 --- a/.github/workflows/docker-ubuntu-image.yml +++ b/.github/workflows/docker-ubuntu-image.yml @@ -1,4 +1,4 @@ -name: Docker Ubuntu 20.04 image +name: Docker Ubuntu 22.04 image on: workflow_dispatch: @@ -12,10 +12,12 @@ env: jobs: build-and-push: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - - name: Checkout + - name: Check out repository uses: actions/checkout@v3 + with: + submodules: 'recursive' - name: Set up QEMU uses: docker/setup-qemu-action@v1 @@ -35,5 +37,5 @@ jobs: uses: docker/build-push-action@v2 with: push: true - context: ./docker + context: ./ tags: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:latest diff --git a/.github/workflows/macos-11.7-compile.yml b/.github/workflows/macos-11.7-compile.yml index d96cdb7b..e69de29b 100644 --- a/.github/workflows/macos-11.7-compile.yml +++ b/.github/workflows/macos-11.7-compile.yml @@ -1,72 +0,0 @@ -name: MacOS 11.7 Big Sur x86-64 Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: macos-11 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - name: Compile OpenSSL - run: | - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=11.7 - make build_libs -j4 - - - name: Build all - run: | - export NONINTERACTIVE=1 - brew install ninja secp256k1 libsodium libmicrohttpd pkg-config - rootPath=`pwd` - mkdir build - cd build - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. - ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator proxy-liteserver - - - name: Find & copy binaries - run: | - mkdir artifacts - cp build/storage/storage-daemon/storage-daemon artifacts/ - cp build/storage/storage-daemon/storage-daemon-cli artifacts/ - cp build/blockchain-explorer/blockchain-explorer artifacts/ - cp build/crypto/fift artifacts/ - cp build/crypto/func artifacts/ - cp build/crypto/create-state artifacts/ - cp build/crypto/tlbc artifacts/ - cp build/validator-engine-console/validator-engine-console artifacts/ - cp build/tonlib/tonlib-cli artifacts/ - cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib - cp build/http/http-proxy artifacts/ - cp build/rldp-http-proxy/rldp-http-proxy artifacts/ - cp build/dht-server/dht-server artifacts/ - cp build/lite-client/lite-client artifacts/ - cp build/validator-engine/validator-engine artifacts/ - cp build/utils/generate-random-id artifacts/ - cp build/utils/json2tlo artifacts/ - cp build/adnl/adnl-proxy artifacts/ - cp build/emulator/*emulator.* artifacts/ - cp build/utils/proxy-liteserver artifacts/ - chmod +x artifacts/* - rsync -r crypto/smartcont artifacts/ - rsync -r crypto/fift/lib artifacts/ - ls -laRt artifacts - - - name: Simple binaries test - run: | - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-macos-11.7 - path: artifacts diff --git a/.github/workflows/macos-12.6-compile.yml b/.github/workflows/macos-12.6-compile.yml deleted file mode 100644 index 75f35559..00000000 --- a/.github/workflows/macos-12.6-compile.yml +++ /dev/null @@ -1,83 +0,0 @@ -name: MacOS 12.6 Monterey x86-64 Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: macos-12 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - name: Compile OpenSSL - run: | - export NONINTERACTIVE=1 - brew install ninja libsodium automake - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./Configure --prefix=/usr/local/macos darwin64-x86_64-cc -static -mmacosx-version-min=12.6 - make build_libs -j4 - - - name: Compile Secp256k1 - run: | - git clone https://github.com/libbitcoin/secp256k1.git - cd secp256k1 - ./autogen.sh - ./configure --enable-module-recovery - make - make install - - - name: Build all - run: | - export NONINTERACTIVE=1 - brew install ninja libmicrohttpd pkg-config - rootPath=`pwd` - mkdir build - cd build - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=12.6 -DCMAKE_CXX_FLAGS="-stdlib=libc++" -DCMAKE_BUILD_TYPE=Release .. - ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator proxy-liteserver - - - name: Find & copy binaries - run: | - mkdir artifacts - cp build/storage/storage-daemon/storage-daemon artifacts/ - cp build/storage/storage-daemon/storage-daemon-cli artifacts/ - cp build/blockchain-explorer/blockchain-explorer artifacts/ - cp build/crypto/fift artifacts/ - cp build/crypto/func artifacts/ - cp build/crypto/create-state artifacts/ - cp build/crypto/tlbc artifacts/ - cp build/validator-engine-console/validator-engine-console artifacts/ - cp build/tonlib/tonlib-cli artifacts/ - cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib - cp build/http/http-proxy artifacts/ - cp build/rldp-http-proxy/rldp-http-proxy artifacts/ - cp build/dht-server/dht-server artifacts/ - cp build/lite-client/lite-client artifacts/ - cp build/validator-engine/validator-engine artifacts/ - cp build/utils/generate-random-id artifacts/ - cp build/utils/json2tlo artifacts/ - cp build/adnl/adnl-proxy artifacts/ - cp build/emulator/*emulator.* artifacts/ - cp build/utils/proxy-liteserver artifacts/ - chmod +x artifacts/* - rsync -r crypto/smartcont artifacts/ - rsync -r crypto/fift/lib artifacts/ - ls -laRt artifacts - - - name: Simple binaries test - run: | - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-macos-12.6 - path: artifacts diff --git a/.github/workflows/ton-aarch64-linux.yml b/.github/workflows/ton-aarch64-linux.yml deleted file mode 100644 index 3c600bee..00000000 --- a/.github/workflows/ton-aarch64-linux.yml +++ /dev/null @@ -1,50 +0,0 @@ -name: "TON aarch64 Linux binaries" - -on: [workflow_dispatch,workflow_call] - -jobs: - build: - runs-on: ubuntu-22.04 - - steps: - - run: | - sudo apt update - sudo apt install -y apt-utils - sudo apt install -q -y qemu-system-aarch64 qemu-efi binfmt-support qemu-user-static - - - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - uses: cachix/install-nix-action@v18 - with: - extra_nix_config: | - access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - - name: Compile - run: nix build .?submodules=1#packages.aarch64-linux.ton-oldglibc_staticbinaries --print-build-logs --system aarch64-linux -o result-aarch64 - - - name: Copy binaries - run: | - ls -lart - mkdir artifacts - cp $PWD/result-aarch64-linux/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-aarch64-linux/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so - cp $PWD/result-aarch64-linux/lib/libemulator.so artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ - - - name: Simple binaries test - run: | - sudo mv /nix/store /nix/store2 - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-aarch64-linux-binaries - path: artifacts \ No newline at end of file diff --git a/.github/workflows/ton-aarch64-macos.yml b/.github/workflows/ton-aarch64-macos.yml deleted file mode 100644 index 75fcec78..00000000 --- a/.github/workflows/ton-aarch64-macos.yml +++ /dev/null @@ -1,47 +0,0 @@ -name: "TON aarch64 macOS binaries" - -on: [workflow_dispatch,workflow_call] - -jobs: - build: - runs-on: macos-12 - - steps: - - run: brew install qemu - - - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - uses: cachix/install-nix-action@v18 - with: - extra_nix_config: | - access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - - name: Compile - run: nix build .?submodules=1#packages.aarch64-darwin.ton-staticbin-dylib --print-build-logs -o result-aarch64-darwin - - - name: Copy binaries - run: | - ls -lart - mkdir artifacts - cp $PWD/result-aarch64-darwin/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-aarch64-darwin/lib/libtonlibjson* artifacts/ - cp $PWD/result-aarch64-darwin/lib/libemulator* artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ - - - name: Simple binaries test - run: | - sudo mv /nix/store /nix/store2 - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-aarch64-macos-binaries - path: artifacts diff --git a/.github/workflows/ton-ccpcheck.yml b/.github/workflows/ton-ccpcheck.yml index 8e9d6ad2..d2d8cf70 100644 --- a/.github/workflows/ton-ccpcheck.yml +++ b/.github/workflows/ton-ccpcheck.yml @@ -1,10 +1,9 @@ -name: TON Ccpcheck +name: TON Static Code Analysis on: [push,workflow_dispatch,workflow_call] jobs: build: - runs-on: ubuntu-22.04 steps: diff --git a/.github/workflows/ton-wasm-emscripten.yml b/.github/workflows/ton-wasm-emscripten.yml deleted file mode 100644 index 40a65818..00000000 --- a/.github/workflows/ton-wasm-emscripten.yml +++ /dev/null @@ -1,40 +0,0 @@ -name: TON WASM Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - runs-on: ubuntu-22.04 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip nodejs libsecp256k1-dev libsodium-dev - - - name: Configure & Build - run: | - cd .github/script - ./fift-func-wasm-build-ubuntu.sh - - - name: Find & copy binaries - run: | - mkdir artifacts - ls build/crypto - cp build/crypto/fift* artifacts - cp build/crypto/func* artifacts - cp build/crypto/tlbc* artifacts - cp build/emulator/emulator-emscripten* artifacts - cp -R crypto/smartcont artifacts - cp -R crypto/fift/lib artifacts - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-wasm-binaries - path: artifacts \ No newline at end of file diff --git a/.github/workflows/ton-x86-64-linux.yml b/.github/workflows/ton-x86-64-linux.yml index e939f22f..abbe1cca 100644 --- a/.github/workflows/ton-x86-64-linux.yml +++ b/.github/workflows/ton-x86-64-linux.yml @@ -1,4 +1,4 @@ -name: "TON x86_64 Linux binaries" +name: Ubuntu TON build (portable, x86-64) on: [push,workflow_dispatch,workflow_call] @@ -15,24 +15,16 @@ jobs: with: submodules: 'recursive' - - uses: cachix/install-nix-action@v18 + - uses: cachix/install-nix-action@v23 with: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - name: Compile - run: nix build .?submodules=1#packages.x86_64-linux.ton-oldglibc_staticbinaries --print-build-logs --system x86_64-linux -o result-x86_64 - - - name: Copy binaries + - name: Build TON run: | - ls -lart - mkdir artifacts - cp $PWD/result-x86_64/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-x86_64/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so - cp $PWD/result-x86_64/lib/libemulator.so artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ + cp assembly/nix/build-linux-x86-64-nix.sh . + chmod +x build-linux-x86-64-nix.sh + ./build-linux-x86-64-nix.sh -t - name: Simple binaries test run: | @@ -46,4 +38,4 @@ jobs: uses: actions/upload-artifact@master with: name: ton-x86_64-linux-binaries - path: artifacts \ No newline at end of file + path: artifacts diff --git a/.github/workflows/ton-x86-64-macos.yml b/.github/workflows/ton-x86-64-macos.yml index b9e9cf9b..8c71f34a 100644 --- a/.github/workflows/ton-x86-64-macos.yml +++ b/.github/workflows/ton-x86-64-macos.yml @@ -1,4 +1,4 @@ -name: "TON x86_64 macOS binaries" +name: MacOS TON build (portable, x86-64) on: [push,workflow_dispatch,workflow_call] @@ -11,24 +11,16 @@ jobs: with: submodules: 'recursive' - - uses: cachix/install-nix-action@v22 + - uses: cachix/install-nix-action@v23 with: extra_nix_config: | access-tokens = github.com=${{ secrets.GITHUB_TOKEN }} - - name: Compile - run: nix build .?submodules=1#packages.x86_64-darwin.ton-staticbin-dylib --print-build-logs -o result-x86_64-darwin - - - name: Copy binaries + - name: Build TON run: | - ls -lart - mkdir artifacts - cp $PWD/result-x86_64-darwin/bin/* artifacts/ - chmod +x artifacts/* - cp $PWD/result-x86_64-darwin/lib/libtonlibjson.dylib artifacts/ - cp $PWD/result-x86_64-darwin/lib/libemulator.dylib artifacts/ - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ + cp assembly/nix/build-macos-nix.sh . + chmod +x build-macos-nix.sh + ./build-macos-nix.sh -t - name: Simple binaries test run: | @@ -42,4 +34,4 @@ jobs: uses: actions/upload-artifact@master with: name: ton-x86_64-macos-binaries - path: artifacts \ No newline at end of file + path: artifacts diff --git a/.github/workflows/ton-x86-64-windows.yml b/.github/workflows/ton-x86-64-windows.yml new file mode 100644 index 00000000..67026183 --- /dev/null +++ b/.github/workflows/ton-x86-64-windows.yml @@ -0,0 +1,34 @@ +name: Windows TON build (portable, x86-64) + +on: [push,workflow_dispatch,workflow_call] + +defaults: + run: + shell: cmd + +jobs: + build: + + runs-on: windows-2022 + + steps: + - name: Get Current OS version + run: | + systeminfo | findstr /B /C:"OS Name" /C:"OS Version" + + - name: Check out current repository + uses: actions/checkout@v3 + with: + submodules: 'recursive' + + - name: Build TON + run: | + copy assembly\native\build-windows-github.bat . + copy assembly\native\build-windows.bat . + build-windows-github.bat Enterprise + + - name: Upload artifacts + uses: actions/upload-artifact@master + with: + name: ton-win-binaries + path: artifacts diff --git a/.github/workflows/tonlib-android-jni.yml b/.github/workflows/tonlib-android-jni.yml deleted file mode 100644 index cdf410b4..00000000 --- a/.github/workflows/tonlib-android-jni.yml +++ /dev/null @@ -1,61 +0,0 @@ -name: Tonlib Android JNI - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: ubuntu-22.04 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build - - - name: Configure & Build - run: | - wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip - unzip android-ndk-r25b-linux.zip - export JAVA_AWT_LIBRARY=NotNeeded - export JAVA_JVM_LIBRARY=NotNeeded - export JAVA_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include - export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux - - export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b - export NDK_PLATFORM="android-21" - export ANDROID_PLATFORM="android-21" - export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto - - rm -rf example/android/src/drinkless/org/ton/TonApi.java - cd example/android/ - - sudo apt install -y libtool autoconf libsodium-dev libsecp256k1-dev - - cmake -GNinja -DTON_ONLY_TONLIB=ON . - - ninja prepare_cross_compiling - - sudo apt remove -y libsodium-dev libsecp256k1-dev - - rm CMakeCache.txt - ./build-all.sh - find . -name "*.debug" -type f -delete - - - name: Find & copy binaries - run: | - mkdir -p artifacts/tonlib-android-jni - cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-android-jni/ - cp -R example/android/libs/* artifacts/tonlib-android-jni/ - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: tonlib-android-jni - path: artifacts \ No newline at end of file diff --git a/.github/workflows/ubuntu-22.04-compile.yml b/.github/workflows/ubuntu-22.04-compile.yml deleted file mode 100644 index fc13701e..00000000 --- a/.github/workflows/ubuntu-22.04-compile.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: Ubuntu 22.04 Compile - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - - runs-on: ubuntu-22.04 - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build libsecp256k1-dev libsodium-dev - - - name: Show CPU flags - run: | - cat /proc/cpuinfo - - - name: Configure & Build - run: | - export CC=$(which clang) - export CXX=$(which clang++) - export CCACHE_DISABLE=1 - - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./config - make build_libs -j4 - - cd .. - rootPath=`pwd` - mkdir build - cd build - - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$rootPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$rootPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DCMAKE_CXX_FLAGS="-mavx2" .. - ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state emulator proxy-liteserver - - - name: Find & copy binaries - run: | - mkdir artifacts - cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli build/tonlib/libtonlibjson.so.0.5 build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.* build/utils/proxy-liteserver artifacts - chmod +x artifacts/* - cp -R crypto/smartcont artifacts/ - cp -R crypto/fift/lib artifacts/ - - - name: Simple binaries test - run: | - artifacts/validator-engine -V - artifacts/lite-client -V - artifacts/fift -V - artifacts/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-ubuntu-binaries - path: artifacts diff --git a/.github/workflows/ubuntu-compile.yml b/.github/workflows/ubuntu-compile.yml deleted file mode 100644 index 6907606d..00000000 --- a/.github/workflows/ubuntu-compile.yml +++ /dev/null @@ -1,68 +0,0 @@ -name: Ubuntu Compile x86-64 - -on: [push,workflow_dispatch,workflow_call] - -jobs: - build: - strategy: - fail-fast: false - matrix: - os: [ubuntu-20.04, ubuntu-22.04] - runs-on: ${{ matrix.os }} - - steps: - - name: Check out repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Install libraries - run: | - sudo apt update - sudo apt install -y build-essential git make cmake clang libgflags-dev zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev ninja-build libsecp256k1-dev libsodium-dev - - - name: Show CPU flags - run: | - cat /proc/cpuinfo - - - name: Configure & Build - run: | - export CC=$(which clang) - export CXX=$(which clang++) - export CCACHE_DISABLE=1 - - mkdir build-${{ matrix.os }} - cd build-${{ matrix.os }} - - git clone https://github.com/openssl/openssl openssl_1_1_1 - cd openssl_1_1_1 - git checkout OpenSSL_1_1_1-stable - ./config - make build_libs -j4 - - cd .. - buildPath=`pwd` - - cmake -GNinja -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=$buildPath/openssl_1_1_1/include -DOPENSSL_CRYPTO_LIBRARY=$buildPath/openssl_1_1_1/libcrypto.a -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DCMAKE_CXX_FLAGS="-mavx2" .. - ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator proxy-liteserver - - - name: Find & copy binaries - run: | - mkdir artifacts-${{ matrix.os }} - cp build-${{ matrix.os }}/storage/storage-daemon/storage-daemon build-${{ matrix.os }}/storage/storage-daemon/storage-daemon-cli build-${{ matrix.os }}/crypto/fift build-${{ matrix.os }}/crypto/tlbc build-${{ matrix.os }}/crypto/func build-${{ matrix.os }}/crypto/create-state build-${{ matrix.os }}/validator-engine-console/validator-engine-console build-${{ matrix.os }}/tonlib/tonlib-cli build-${{ matrix.os }}/tonlib/libtonlibjson.so.0.5 build-${{ matrix.os }}/http/http-proxy build-${{ matrix.os }}/rldp-http-proxy/rldp-http-proxy build-${{ matrix.os }}/dht-server/dht-server build-${{ matrix.os }}/lite-client/lite-client build-${{ matrix.os }}/validator-engine/validator-engine build-${{ matrix.os }}/utils/generate-random-id build-${{ matrix.os }}/utils/json2tlo build-${{ matrix.os }}/adnl/adnl-proxy build-${{ matrix.os }}/emulator/libemulator.* build-${{ matrix.os }}/utils/proxy-liteserver artifacts-${{ matrix.os }} - chmod +x artifacts-${{ matrix.os }}/* - cp -R crypto/smartcont artifacts-${{ matrix.os }} - cp -R crypto/fift/lib artifacts-${{ matrix.os }} - - - name: Simple binaries test - run: | - artifacts-${{ matrix.os }}/validator-engine -V - artifacts-${{ matrix.os }}/lite-client -V - artifacts-${{ matrix.os }}/fift -V - artifacts-${{ matrix.os }}/func -V - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-binaries-${{ matrix.os }} - path: artifacts-${{ matrix.os }} diff --git a/.github/workflows/win-2019-compile.yml b/.github/workflows/win-2019-compile.yml deleted file mode 100644 index 62a560d2..00000000 --- a/.github/workflows/win-2019-compile.yml +++ /dev/null @@ -1,103 +0,0 @@ -name: Windows Server 2019 x64 Compile - -on: [push,workflow_dispatch,workflow_call] - -defaults: - run: - shell: cmd - -jobs: - build: - - runs-on: windows-2019 - - steps: - - name: Get Current OS version - run: | - systeminfo | findstr /B /C:"OS Name" /C:"OS Version" - - - name: Check out current repository - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - name: Check out zlib repository - uses: actions/checkout@v3 - with: - repository: desktop-app/zlib - path: zlib - - - name: Setup msbuild.exe - uses: microsoft/setup-msbuild@v1.1 - - - name: Install Pkg-config Lite - run: choco install pkgconfiglite - - - name: Compile zlib Win64 - run: | - cd zlib\contrib\vstudio\vc14 - msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v142 - - - name: Compile secp256k1 Win64 - run: | - git clone https://github.com/libbitcoin/secp256k1.git - cd secp256k1\builds\msvc\vs2017 - msbuild /p:Configuration=StaticRelease -p:PlatformToolset=v142 -p:Platform=x64 - - - name: Install pre-compiled libsodium Win64 - run: | - curl -Lo libsodium-1.0.18-stable-msvc.zip https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable-msvc.zip - unzip libsodium-1.0.18-stable-msvc.zip - - - name: Install pre-compiled OpenSSL Win64 - run: | - curl -Lo openssl-1.1.1j.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-1.1.1j.zip - jar xf openssl-1.1.1j.zip - - - name: Install pre-compiled libmicrohttpd Win64 - run: | - curl -Lo libmicrohttpd-0.9.77-w32-bin.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/libmicrohttpd-0.9.77-w32-bin.zip - unzip libmicrohttpd-0.9.77-w32-bin.zip - - - name: Install pre-compiled Readline Win64 - run: | - curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip - unzip readline-5.0-1-lib.zip - - - name: Compile - run: | - set root=%cd% - set SODIUM_DIR=%root%\libsodium - echo %root% - echo %SODIUM_DIR% - mkdir build - cd build - cmake -DCMAKE_BUILD_TYPE=Release -DSODIUM_USE_STATIC_LIBS=1 -DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include -DSECP256K1_LIBRARY=%root%\secp256k1\bin\x64\Release\v142\static\secp256k1.lib -DREADLINE_INCLUDE_DIR=%root%\readline-5.0-1-lib\include\readline -DREADLINE_LIBRARY=%root%\readline-5.0-1-lib\lib\readline.lib -DPORTABLE=1 -DZLIB_FOUND=1 -DMHD_FOUND=1 -DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib -DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static -DZLIB_INCLUDE_DIR=%root%\zlib -DZLIB_LIBRARY=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib -DOPENSSL_FOUND=1 -DOPENSSL_INCLUDE_DIR=%root%/openssl-1.1.1j/include -DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-1.1.1j/lib/libcrypto_static.lib -DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj /W0" .. - cmake --build . --target storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator proxy-liteserver --config Release - - - name: Show executables - run: | - cd build - del Release\test-* - dir *.exe /a-D /S /B - dir *.dll /a-D /S /B - - - name: Check if validator-engine.exe exists - run: | - copy %cd%\build\validator-engine\Release\validator-engine.exe test - - - name: Find & copy binaries - run: | - mkdir artifacts - mkdir artifacts\smartcont - mkdir artifacts\lib - - for %%I in (build\storage\storage-daemon\Release\storage-daemon.exe build\storage\storage-daemon\Release\storage-daemon-cli.exe build\blockchain-explorer\blockchain-explorer.exe build\crypto\Release\fift.exe build\crypto\Release\tlbc.exe build\crypto\Release\func.exe build\crypto\Release\create-state.exe build\validator-engine-console\Release\validator-engine-console.exe build\tonlib\Release\tonlib-cli.exe build\tonlib\Release\tonlibjson.dll build\http\Release\http-proxy.exe build\rldp-http-proxy\Release\rldp-http-proxy.exe build\dht-server\Release\dht-server.exe build\lite-client\Release\lite-client.exe build\validator-engine\Release\validator-engine.exe build\utils\Release\generate-random-id.exe build\utils\Release\json2tlo.exe build\adnl\Release\adnl-proxy.exe build\emulator\Release\emulator.dll build\utils\proxy-liteserver.exe) do copy %%I artifacts\ - xcopy /e /k /h /i crypto\smartcont artifacts\smartcont - xcopy /e /k /h /i crypto\fift\lib artifacts\lib - - - name: Upload artifacts - uses: actions/upload-artifact@master - with: - name: ton-win-binaries - path: artifacts diff --git a/.gitignore b/.gitignore index 54d9ffc7..536918ab 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,14 @@ test/regression-tests.cache/ *.swp **/*build*/ .idea -.vscode \ No newline at end of file +.vscode +zlib/ +libsodium/ +libmicrohttpd-0.9.77-w32-bin/ +readline-5.0-1-lib/ +secp256k1/ +openssl-3.1.4/ +libsodium-1.0.18-stable-msvc.zip +libmicrohttpd-0.9.77-w32-bin.zip +openssl-3.1.4.zip +readline-5.0-1-lib.zip diff --git a/CMake/FindMHD.cmake b/CMake/FindMHD.cmake index c4b94c0e..7d6dd5fd 100644 --- a/CMake/FindMHD.cmake +++ b/CMake/FindMHD.cmake @@ -2,23 +2,26 @@ # Once done this will define # # MHD_FOUND - system has MHD -# MHD_INCLUDE_DIRS - the MHD include directory +# MHD_INCLUDE_DIR - the MHD include directory # MHD_LIBRARY - Link these to use MHD -find_path( - MHD_INCLUDE_DIR - NAMES microhttpd.h - DOC "microhttpd include dir" -) +if (NOT MHD_LIBRARY) + find_path( + MHD_INCLUDE_DIR + NAMES microhttpd.h + DOC "microhttpd include dir" + ) -find_library( - MHD_LIBRARY - NAMES microhttpd microhttpd-10 libmicrohttpd libmicrohttpd-dll - DOC "microhttpd library" -) + find_library( + MHD_LIBRARY + NAMES microhttpd microhttpd-10 libmicrohttpd libmicrohttpd-dll + DOC "microhttpd library" + ) +endif() -set(MHD_INCLUDE_DIRS ${MHD_INCLUDE_DIR}) -set(MHD_LIBRARIES ${MHD_LIBRARY}) +if (MHD_LIBRARY) + message(STATUS "Found MHD: ${MHD_LIBRARY}") +endif() include(FindPackageHandleStandardArgs) find_package_handle_standard_args(MHD DEFAULT_MSG MHD_INCLUDE_DIR MHD_LIBRARY) diff --git a/CMake/FindSecp256k1.cmake b/CMake/FindSecp256k1.cmake index 11603f15..68a37c71 100644 --- a/CMake/FindSecp256k1.cmake +++ b/CMake/FindSecp256k1.cmake @@ -2,28 +2,27 @@ # Once done this will define # # SECP256K1_FOUND - system has SECP256K1 -# SECP256K1_INCLUDE_DIRS - the SECP256K1 include directory +# SECP256K1_INCLUDE_DIR - the SECP256K1 include directory # SECP256K1_LIBRARY - Link these to use SECP256K1 -find_path( - SECP256K1_INCLUDE_DIR - NAMES secp256k1_recovery.h - DOC "secp256k1_recovery.h include dir" -) +if (NOT SECP256K1_LIBRARY) + find_path( + SECP256K1_INCLUDE_DIR + NAMES secp256k1_recovery.h + DOC "secp256k1_recovery.h include dir" + ) -find_library( - SECP256K1_LIBRARY - NAMES secp256k1 libsecp256k1 - DOC "secp256k1 library" -) + find_library( + SECP256K1_LIBRARY + NAMES secp256k1 libsecp256k1 + DOC "secp256k1 library" + ) +endif() if (SECP256K1_LIBRARY) message(STATUS "Found Secp256k1: ${SECP256K1_LIBRARY}") endif() -set(SECP256K1_INCLUDE_DIRS ${SECP256K1_INCLUDE_DIR}) -set(SECP256K1_LIBRARIES ${SECP256K1_LIBRARY}) - include(FindPackageHandleStandardArgs) find_package_handle_standard_args(Secp256k1 DEFAULT_MSG SECP256K1_INCLUDE_DIR SECP256K1_LIBRARY) mark_as_advanced(SECP256K1_INCLUDE_DIR SECP256K1_LIBRARY) diff --git a/CMake/FindSodium.cmake b/CMake/FindSodium.cmake index 0053ac54..85194ee2 100644 --- a/CMake/FindSodium.cmake +++ b/CMake/FindSodium.cmake @@ -26,6 +26,7 @@ # Furthermore an imported "sodium" target is created. # + if (CMAKE_C_COMPILER_ID STREQUAL "GNU" OR CMAKE_C_COMPILER_ID STREQUAL "Clang") set(_GCC_COMPATIBLE 1) diff --git a/CMakeLists.txt b/CMakeLists.txt index 4b6f0dc9..89be3238 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.1 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) project(TON VERSION 0.5 LANGUAGES C CXX) set(CMAKE_POSITION_INDEPENDENT_CODE ON) @@ -79,7 +79,7 @@ else() set(HAVE_SSE42 FALSE) endif() -set(CMAKE_CXX_STANDARD 14) +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED TRUE) set(CMAKE_CXX_EXTENSIONS FALSE) @@ -211,7 +211,13 @@ set(CMAKE_THREAD_PREFER_PTHREAD ON) set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) find_package(PkgConfig REQUIRED) -find_package(ZLIB REQUIRED) + +if (NOT ZLIB_FOUND) + find_package(ZLIB REQUIRED) +else() + message(STATUS "Using zlib ${ZLIB_LIBRARIES}") +endif() + if (TON_ARCH AND NOT MSVC) CHECK_CXX_COMPILER_FLAG( "-march=${TON_ARCH}" COMPILER_OPT_ARCH_SUPPORTED ) @@ -255,6 +261,9 @@ if (MSVC) add_definitions(-D_SCL_SECURE_NO_WARNINGS -D_CRT_SECURE_NO_WARNINGS) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP /W4 /wd4100 /wd4127 /wd4324 /wd4456 /wd4457 /wd4458 /wd4505 /wd4702") elseif (CLANG OR GCC) + if (GCC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstrong-eval-order=some") + endif() set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-omit-frame-pointer") if (APPLE) #use "-Wl,-exported_symbols_list,${CMAKE_CURRENT_SOURCE_DIR}/export_list" for exported symbols @@ -368,6 +377,9 @@ if (LATEX_FOUND) add_latex_document(doc/fiftbase.tex TARGET_NAME fift_basic_description) add_latex_document(doc/catchain.tex TARGET_NAME catchain_consensus_description) endif() +if (NOT LATEX_FOUND) + message(STATUS "Could NOT find LATEX (this is NOT an error)") +endif() #END internal function(target_link_libraries_system target) @@ -447,6 +459,10 @@ target_link_libraries(test-smartcont PRIVATE smc-envelope fift-lib ton_db) add_executable(test-bigint ${BIGINT_TEST_SOURCE}) target_link_libraries(test-bigint PRIVATE ton_crypto) +if (WINGETOPT_FOUND) + target_link_libraries_system(test-bigint wingetopt) +endif() + add_executable(test-cells test/test-td-main.cpp ${CELLS_TEST_SOURCE}) target_link_libraries(test-cells PRIVATE ton_crypto) @@ -513,26 +529,12 @@ target_link_libraries(test-rldp2 adnl adnltest dht rldp2 tl_api) add_executable(test-validator-session-state test/test-validator-session-state.cpp) target_link_libraries(test-validator-session-state adnl dht rldp validatorsession tl_api) -#add_executable(test-node test/test-node.cpp) -#target_link_libraries(test-node overlay tdutils tdactor adnl tl_api dht -# catchain validatorsession) - add_executable(test-catchain test/test-catchain.cpp) target_link_libraries(test-catchain overlay tdutils tdactor adnl adnltest rldp tl_api dht catchain ) -#add_executable(test-validator-session test/test-validator-session.cpp) -#target_link_libraries(test-validator-session overlay tdutils tdactor adnl tl_api dht -# catchain validatorsession) add_executable(test-ton-collator test/test-ton-collator.cpp) target_link_libraries(test-ton-collator overlay tdutils tdactor adnl tl_api dht catchain validatorsession validator-disk ton_validator validator-disk ) -#add_executable(test-validator test/test-validator.cpp) -#target_link_libraries(test-validator overlay tdutils tdactor adnl tl_api dht -# rldp catchain validatorsession ton-node validator ton_validator validator memprof ${JEMALLOC_LIBRARIES}) -#add_executable(test-ext-server test/test-ext-server.cpp) -#target_link_libraries(test-ext-server tdutils tdactor adnl tl_api dht ) -#add_executable(test-ext-client test/test-ext-client.cpp) -#target_link_libraries(test-ext-client tdutils tdactor adnl tl_api tl-lite-utils) add_executable(test-http test/test-http.cpp) target_link_libraries(test-http PRIVATE tonhttp) @@ -574,13 +576,53 @@ add_test(test-tdutils test-tdutils) add_test(test-tonlib-offline test-tonlib-offline) #END tonlib +# FunC tests +if (NOT NIX) + if (MSVC) + set(PYTHON_VER "python") + else() + set(PYTHON_VER "python3") + endif() + add_test( + NAME test-func + COMMAND ${PYTHON_VER} run_tests.py tests/ + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/crypto/func/auto-tests) + if (WIN32) + set_property(TEST test-func PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func.exe" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift.exe" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + else() + set_property(TEST test-func PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + endif() + + add_test( + NAME test-func-legacy + COMMAND ${PYTHON_VER} legacy_tester.py + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/crypto/func/auto-tests) + if (WIN32) + set_property(TEST test-func-legacy PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func.exe" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift.exe" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + else() + set_property(TEST test-func-legacy PROPERTY ENVIRONMENT + "FUNC_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/func" + "FIFT_EXECUTABLE=${CMAKE_CURRENT_BINARY_DIR}/crypto/fift" + "FIFTPATH=${CMAKE_CURRENT_SOURCE_DIR}/crypto/fift/lib/") + endif() +endif() + #BEGIN internal if (NOT TON_ONLY_TONLIB) add_test(test-adnl test-adnl) add_test(test-dht test-dht) add_test(test-rldp test-rldp) add_test(test-rldp2 test-rldp2) -#add_test(test-validator-session-state test-validator-session-state) +add_test(test-validator-session-state test-validator-session-state) add_test(test-catchain test-catchain) add_test(test-fec test-fec) diff --git a/Changelog.md b/Changelog.md index 0f0cd833..9a98e44c 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,3 +1,55 @@ +## 2024.01 Update + +1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to caunt gas on special accounts separately. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `version >= 5` in `ConfigParam 8;`. + * Besides update of config temporally increases gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` to `special_gas_limit`, see [details](https://t.me/tonstatus/88). +2. Improvements in LS behavior + * Improved detection of the state with all shards applied to decrease rate of `Block is not applied` error + * Better error logs: `block not in db` and `block is not applied` separation + * Fix error in proof generation for blocks after merge + * Fix most of `block is not applied` issues related to sending too recent block in Proofs + * LS now check external messages till `accept_message` (`set_gas`). +3. Improvements in DHT work and storage, CellDb, config.json ammendment, peer misbehavior detection, validator session stats collection, emulator. +4. Change in CTOS and XLOAD behavior activated through setting `version >= 5` in `ConfigParam 8;`: + * Loading "nested libraries" (i.e. a library cell that points to another library cell) throws an exception. + * Loading a library consumes gas for cell load only once (for the library cell), not twice (both for the library cell and the cell in the library). + * `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. + +Besides the work of the Core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection) and @akifoq (CTOS behavior and gas limit scheme for special accounts). + +## 2023.12 Update + +1. Optimized message queue handling, now queue cleaning speed doesn't depend on total queue size + * Cleaning delivered messages using lt augmentation instead of random search / consequtive walk + * Keeping root cell of queue message in memory until outdated (caching) +2. Changes to block collation/validation limits +3. Stop accepting new external message if message queue is overloaded +4. Introducing conditions for shard split/merge based on queue size + +Read [more](https://blog.ton.org/technical-report-december-5-inscriptions-launch-on-ton) on that update. + +## 2023.11 Update + +1. New TVM Functionality. (Disabled by default) +2. A series of emulator improvements: libraries support, higher max stack size, etc +3. A series of tonlib and tonlib-cli improvements: wallet-v4 support, getconfig, showtransactions, etc +4. Changes to public libraries: now contract can not publish more than 256 libraries (config parameter) and contracts can not be deployed with public libraries in initstate (instead contracts need explicitly publish all libraries) +5. Changes to storage due payment: now due payment is collected in Storage Phase, however for bouncable messages fee amount can not exceed balance of account prior to message. + + +Besides the work of the core team, this update is based on the efforts of @aleksej-paschenko (emulator improvements), @akifoq (security improvements), Trail of Bits auditor as well as all participants of [TEP-88 discussion](https://github.com/ton-blockchain/TEPs/pull/88). + +## 2023.10 Update +1. A series of additional security checks in node: special cells in action list, init state in external messages, peers data prior to saving to disk. +2. Human-readable timestamps in explorer + +Besides the work of the core team, this update is based on the efforts of @akifoq and @mr-tron. + +## 2023.06 Update +1. (disabled by default) New deflation mechanisms: partial fee burning and blackhole address +2. Storage-contract improvement + +Besides the work of the core team, this update is based on the efforts of @DearJohnDoe from Tonbyte (Storage-contract improvement). + ## 2023.05 Update 1. Archive manager optimization 2. A series of catchain (basic consensus protocol) security improvements diff --git a/docker/Dockerfile b/Dockerfile similarity index 65% rename from docker/Dockerfile rename to Dockerfile index 595b4d87..e3cd7b26 100644 --- a/docker/Dockerfile +++ b/Dockerfile @@ -1,26 +1,29 @@ -FROM ubuntu:20.04 as builder +FROM ubuntu:22.04 as builder RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang-6.0 openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev pkg-config && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y build-essential cmake clang openssl libssl-dev zlib1g-dev gperf wget git ninja-build libsecp256k1-dev libsodium-dev libmicrohttpd-dev pkg-config autoconf automake libtool && \ rm -rf /var/lib/apt/lists/* -ENV CC clang-6.0 -ENV CXX clang++-6.0 +ENV CC clang +ENV CXX clang++ ENV CCACHE_DISABLE 1 + WORKDIR / -RUN git clone --recursive https://github.com/ton-blockchain/ton +RUN mkdir ton WORKDIR /ton +COPY ./ ./ + RUN mkdir build && \ cd build && \ - cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= -DCMAKE_CXX_FLAGS="-mavx2" .. && \ + cmake -GNinja -DCMAKE_BUILD_TYPE=Release -DPORTABLE=1 -DTON_ARCH= .. && \ ninja storage-daemon storage-daemon-cli tonlibjson fift func validator-engine validator-engine-console generate-random-id dht-server lite-client -FROM ubuntu:20.04 +FROM ubuntu:22.04 RUN apt-get update && \ - apt-get install -y openssl wget libatomic1 && \ - rm -rf /var/lib/apt/lists/* + apt-get install -y wget libatomic1 openssl libsecp256k1-dev libsodium-dev libmicrohttpd-dev && \ + rm -rf /var/lib/apt/lists/* RUN mkdir -p /var/ton-work/db && \ - mkdir -p /var/ton-work/db/static + mkdir -p /var/ton-work/db/static COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon /usr/local/bin/ COPY --from=builder /ton/build/storage/storage-daemon/storage-daemon-cli /usr/local/bin/ @@ -30,7 +33,7 @@ COPY --from=builder /ton/build/validator-engine-console/validator-engine-console COPY --from=builder /ton/build/utils/generate-random-id /usr/local/bin/ WORKDIR /var/ton-work/db -COPY init.sh control.template ./ +COPY ./docker/init.sh ./docker/control.template ./ RUN chmod +x init.sh -ENTRYPOINT ["/var/ton-work/db/init.sh"] +ENTRYPOINT ["/var/ton-work/db/init.sh"] \ No newline at end of file diff --git a/README.md b/README.md index 6bc3b1f5..653f2f83 100644 --- a/README.md +++ b/README.md @@ -37,13 +37,13 @@ Main TON monorepo, which includes the code of the node/validator, lite-client, t ## The Open Network __The Open Network (TON)__ is a fast, secure, scalable blockchain focused on handling _millions of transactions per second_ (TPS) with the goal of reaching hundreds of millions of blockchain users. -- To learn more about different aspects of TON blockchain and its underlying ecosystem check [documentation](ton.org/docs) +- To learn more about different aspects of TON blockchain and its underlying ecosystem check [documentation](https://ton.org/docs) - To run node, validator or lite-server check [Participate section](https://ton.org/docs/participate/nodes/run-node) - To develop decentralised apps check [Tutorials](https://ton.org/docs/develop/smart-contracts/), [FunC docs](https://ton.org/docs/develop/func/overview) and [DApp tutorials](https://ton.org/docs/develop/dapps/) - To work on TON check [wallets](https://ton.app/wallets), [explorers](https://ton.app/explorers), [DEXes](https://ton.app/dex) and [utilities](https://ton.app/utilities) - To interact with TON check [APIs](https://ton.org/docs/develop/dapps/apis/) -## Updates flow: +## Updates flow * **master branch** - mainnet is running on this stable branch. @@ -61,8 +61,91 @@ Usually, the response to your pull request will indicate which section it falls * Thou shall not merge your own PRs, at least one person should review the PR and merge it (4-eyes rule) * Thou shall make sure that workflows are cleanly completed for your PR before considering merge -## Workflows responsibility -If a CI workflow fails not because of your changes but workflow issues, try to fix it yourself or contact one of the persons listed below via Telegram messenger: +## Build TON blockchain -* **C/C++ CI (ccpp-linux.yml)**: TBD -* **C/C++ CI Win64 Compile (ccpp-win64.yml)**: TBD +### Ubuntu 20.4, 22.04 (x86-64, aarch64) +Install additional system libraries +```bash + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 16 all +``` +Compile TON binaries +```bash + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh +``` + +### MacOS 11, 12 (x86-64, aarch64) +```bash + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh +``` + +### Windows 10, 11, Server (x86-64) +You need to install `MS Visual Studio 2022` first. +Go to https://www.visualstudio.com/downloads/ and download `MS Visual Studio 2022 Community`. + +Launch installer and select `Desktop development with C++`. +After installation, also make sure that `cmake` is globally available by adding +`C:\Program Files\Microsoft Visual Studio\2022\Community\Common7\IDE\CommonExtensions\Microsoft\CMake\CMake\bin` to the system `PATH` (adjust the path per your needs). + +Open an elevated (Run as Administrator) `x86-64 Native Tools Command Prompt for VS 2022`, go to the root folder and execute: +```bash + copy assembly\native\build-windows.bat . + build-windows.bat +``` + +### Building TON to WebAssembly +Install additional system libraries on Ubuntu +```bash + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + + wget https://apt.llvm.org/llvm.sh + chmod +x llvm.sh + sudo ./llvm.sh 16 all +``` +Compile TON binaries with emscripten +```bash + cd assembly/wasm + chmod +x fift-func-wasm-build-ubuntu.sh + ./fift-func-wasm-build-ubuntu.sh +``` + +### Building TON tonlib library for Android (arm64-v8a, armeabi-v7a, x86, x86-64) +Install additional system libraries on Ubuntu +```bash + sudo apt-get update + sudo apt-get install -y build-essential git cmake ninja-build automake libtool texinfo autoconf libgflags-dev \ + zlib1g-dev libssl-dev libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev \ + libtool autoconf libsodium-dev libsecp256k1-dev +``` +Compile TON tonlib library +```bash + cp assembly/android/build-android-tonlib.sh . + chmod +x build-android-tonlib.sh + ./build-android-tonlib.sh +``` + +### Build TON portable binaries with Nix package manager +You need to install Nix first. +```bash + sh <(curl -L https://nixos.org/nix/install) --daemon +``` +Then compile TON with Nix by executing below command from the root folder: +```bash + cp -r assembly/nix/* . + export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + nix-build linux-x86-64-static.nix +``` +More examples for other platforms can be found under `assembly/nix`. + +## Running tests + +Tests are executed by running `ctest` in the build directory. See `doc/Tests.md` for more information. \ No newline at end of file diff --git a/adnl/CMakeLists.txt b/adnl/CMakeLists.txt index b287cba0..217a9624 100644 --- a/adnl/CMakeLists.txt +++ b/adnl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #BEGIN internal if (NOT TON_ONLY_TONLIB) diff --git a/assembly/android/build-android-tonlib.sh b/assembly/android/build-android-tonlib.sh new file mode 100644 index 00000000..e470f602 --- /dev/null +++ b/assembly/android/build-android-tonlib.sh @@ -0,0 +1,55 @@ +with_artifacts=false + +while getopts 'a' flag; do + case "${flag}" in + a) with_artifacts=true ;; + *) break + ;; + esac +done + +if [ ! -d android-ndk-r25b ]; then + rm android-ndk-r25b-linux.zip + wget -q https://dl.google.com/android/repository/android-ndk-r25b-linux.zip + unzip -q android-ndk-r25b-linux.zip + test $? -eq 0 || { echo "Can't unzip android-ndk-r25b-linux.zip"; exit 1; } + echo Android NDK extracted +else + echo Using extracted Android NDK +fi + +export JAVA_AWT_LIBRARY=NotNeeded +export JAVA_JVM_LIBRARY=NotNeeded +export JAVA_INCLUDE_PATH=${JAVA_HOME}/include +export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include +export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux + +export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b +export NDK_PLATFORM="android-21" +export ANDROID_PLATFORM="android-21" +export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto + +rm -rf example/android/src/drinkless/org/ton/TonApi.java +cd example/android/ + +rm CMakeCache.txt .ninja_* +cmake -GNinja -DTON_ONLY_TONLIB=ON . + +test $? -eq 0 || { echo "Can't configure TON"; exit 1; } + +ninja prepare_cross_compiling + +test $? -eq 0 || { echo "Can't compile prepare_cross_compiling"; exit 1; } + +rm CMakeCache.txt .ninja_* + +. ./build-all.sh + +find . -name "*.debug" -type f -delete + +if [ "$with_artifacts" = true ]; then + cd ../.. + mkdir -p artifacts/tonlib-android-jni + cp example/android/src/drinkless/org/ton/TonApi.java artifacts/tonlib-android-jni/ + cp -R example/android/libs/* artifacts/tonlib-android-jni/ +fi diff --git a/assembly/cicd/jenkins/test-builds.groovy b/assembly/cicd/jenkins/test-builds.groovy new file mode 100644 index 00000000..960ac8db --- /dev/null +++ b/assembly/cicd/jenkins/test-builds.groovy @@ -0,0 +1,236 @@ +pipeline { + agent none + stages { + stage('Run Builds') { + parallel { + stage('Ubuntu 20.04 x86-64 (shared)') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-x86_64-linux-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86_64-linux-shared.zip' + } + } + } + stage('Ubuntu 20.04 x86-64 (portable)') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-linux-x86-64-nix.sh . + chmod +x build-linux-x86-64-nix.sh + ./build-linux-x86-64-nix.sh -t + ''' + sh ''' + cd artifacts + zip -9r ton-x86-64-linux-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-linux-portable.zip' + } + } + } + stage('Ubuntu 20.04 aarch64 (shared)') { + agent { + label 'Ubuntu_arm64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-ubuntu-shared.sh . + chmod +x build-ubuntu-shared.sh + ./build-ubuntu-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-linux-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-linux-shared.zip' + } + } + } + stage('Ubuntu 20.04 aarch64 (portable)') { + agent { + label 'Ubuntu_arm64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-linux-arm64-nix.sh . + chmod +x build-linux-arm64-nix.sh + ./build-linux-arm64-nix.sh -t + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-linux-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-linux-portable.zip' + } + } + } + stage('macOS 12.7 x86-64 (shared)') { + agent { + label 'macOS_12.7_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-x86-64-macos-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-macos-shared.zip' + } + } + } + stage('macOS 12.7 x86-64 (portable)') { + agent { + label 'macOS_12.7_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-macos-nix.sh . + chmod +x build-macos-nix.sh + ./build-macos-nix.sh -t + ''' + sh ''' + cd artifacts + zip -9r ton-x86-64-macos-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-macos-portable.zip' + } + } + } + stage('macOS 12.6 aarch64 (shared)') { + agent { + label 'macOS_12.6-arm64-m1' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-macos-m1-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-macos-m1-shared.zip' + } + } + } + stage('macOS 12.6 aarch64 (portable)') { + agent { + label 'macOS_12.6-arm64-m1' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/nix/build-macos-nix.sh . + chmod +x build-macos-nix.sh + ./build-macos-nix.sh -t + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-macos-portable ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-macos-portable.zip' + } + } + } + stage('macOS 13.2 aarch64 (shared)') { + agent { + label 'macOS_13.2-arm64-m2' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/native/build-macos-shared.sh . + chmod +x build-macos-shared.sh + ./build-macos-shared.sh -t -a + ''' + sh ''' + cd artifacts + zip -9r ton-arm64-macos-m2-shared ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-arm64-macos-m2-shared.zip' + } + } + } + stage('Windows Server 2022 x86-64') { + agent { + label 'Windows_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + bat ''' + copy assembly\\native\\build-windows.bat . + build-windows.bat + ''' + bat ''' + cd artifacts + zip -9r ton-x86-64-windows ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-x86-64-windows.zip' + } + } + } + stage('Android Tonlib') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cp assembly/android/build-android-tonlib.sh . + chmod +x build-android-tonlib.sh + ./build-android-tonlib.sh -a + ''' + sh ''' + cd artifacts/tonlib-android-jni + zip -9r ton-android-tonlib ./* + ''' + archiveArtifacts artifacts: 'artifacts/tonlib-android-jni/ton-android-tonlib.zip' + } + } + } + stage('WASM fift func emulator') { + agent { + label 'Ubuntu_x86-64' + } + steps { + timeout(time: 90, unit: 'MINUTES') { + sh ''' + cd assembly/wasm + chmod +x fift-func-wasm-build-ubuntu.sh + ./fift-func-wasm-build-ubuntu.sh -a + ''' + sh ''' + cd artifacts + zip -9r ton-wasm-binaries ./* + ''' + archiveArtifacts artifacts: 'artifacts/ton-wasm-binaries.zip' + } + } + } + } + } + } +} diff --git a/assembly/native/build-macos-portable.sh b/assembly/native/build-macos-portable.sh new file mode 100644 index 00000000..a4187d4c --- /dev/null +++ b/assembly/native/build-macos-portable.sh @@ -0,0 +1,207 @@ +#/bin/bash + +with_tests=false +with_artifacts=false +OSX_TARGET=10.15 + + +while getopts 'tao:' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + o) OSX_TARGET=${OPTARG} ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export NONINTERACTIVE=1 +brew install ninja pkg-config automake libtool autoconf +brew install llvm@16 + + +if [ -f /opt/homebrew/opt/llvm@16/bin/clang ]; then + export CC=/opt/homebrew/opt/llvm@16/bin/clang + export CXX=/opt/homebrew/opt/llvm@16/bin/clang++ +else + export CC=/usr/local/opt/llvm@16/bin/clang + export CXX=/usr/local/opt/llvm@16/bin/clang++ +fi +export CCACHE_DISABLE=1 + +if [ ! -d "secp256k1" ]; then +git clone https://github.com/bitcoin-core/secp256k1.git +cd secp256k1 +secp256k1Path=`pwd` +git checkout v0.3.2 +./autogen.sh +./configure --enable-module-recovery --enable-static --disable-tests --disable-benchmark --with-pic +make -j12 +test $? -eq 0 || { echo "Can't compile secp256k1"; exit 1; } +cd .. +else + secp256k1Path=$(pwd)/secp256k1 + echo "Using compiled secp256k1" +fi + +if [ ! -d "libsodium" ]; then + export LIBSODIUM_FULL_BUILD=1 + git clone https://github.com/jedisct1/libsodium.git + cd libsodium + sodiumPath=`pwd` + git checkout 1.0.18 + ./autogen.sh + ./configure --with-pic --enable-static + make -j12 + test $? -eq 0 || { echo "Can't compile libsodium"; exit 1; } + cd .. +else + sodiumPath=$(pwd)/libsodium + echo "Using compiled libsodium" +fi + +if [ ! -d "openssl_3" ]; then + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + opensslPath=`pwd` + git checkout openssl-3.1.4 + ./config -static + make build_libs -j12 + test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; } + cd .. +else + opensslPath=$(pwd)/openssl_3 + echo "Using compiled openssl_3" +fi + +if [ ! -d "zlib" ]; then + git clone https://github.com/madler/zlib.git + cd zlib + zlibPath=`pwd` + ./configure --static + make -j12 + test $? -eq 0 || { echo "Can't compile zlib"; exit 1; } + cd .. +else + zlibPath=$(pwd)/zlib + echo "Using compiled zlib" +fi + +if [ ! -d "libmicrohttpd" ]; then + git clone https://git.gnunet.org/libmicrohttpd.git + cd libmicrohttpd + libmicrohttpdPath=`pwd` + ./autogen.sh + ./configure --enable-static --disable-tests --disable-benchmark --disable-shared --disable-https --with-pic + make -j12 + test $? -eq 0 || { echo "Can't compile libmicrohttpd"; exit 1; } + cd .. +else + libmicrohttpdPath=$(pwd)/libmicrohttpd + echo "Using compiled libmicrohttpd" +fi + +cmake -GNinja .. \ +-DPORTABLE=1 \ +-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=$OSX_TARGET \ +-DCMAKE_CXX_FLAGS="-stdlib=libc++" \ +-DCMAKE_BUILD_TYPE=Release \ +-DOPENSSL_FOUND=1 \ +-DOPENSSL_INCLUDE_DIR=$opensslPath/include \ +-DOPENSSL_CRYPTO_LIBRARY=$opensslPath/libcrypto.a \ +-DZLIB_FOUND=1 \ +-DZLIB_INCLUDE_DIR=$zlibPath \ +-DZLIB_LIBRARIES=$zlibPath/libz.a \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=$secp256k1Path/include \ +-DSECP256K1_LIBRARY=$secp256k1Path/.libs/libsecp256k1.a \ +-DSODIUM_FOUND=1 \ +-DSODIUM_INCLUDE_DIR=$sodiumPath/src/libsodium/include \ +-DSODIUM_LIBRARY_RELEASE=$sodiumPath/src/libsodium/.libs/libsodium.a \ +-DMHD_FOUND=1 \ +-DMHD_INCLUDE_DIR=$libmicrohttpdPath/src/include \ +-DMHD_LIBRARY=$libmicrohttpdPath/src/microhttpd/.libs/libmicrohttpd.a + + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \ + test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \ + test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + +strip storage/storage-daemon/storage-daemon +strip storage/storage-daemon/storage-daemon-cli +strip blockchain-explorer/blockchain-explorer +strip crypto/fift +strip crypto/func +strip crypto/create-state +strip crypto/tlbc +strip validator-engine-console/validator-engine-console +strip tonlib/tonlib-cli +strip http/http-proxy +strip rldp-http-proxy/rldp-http-proxy +strip dht-server/dht-server +strip lite-client/lite-client +strip validator-engine/validator-engine +strip utils/generate-random-id +strip utils/json2tlo +strip adnl/adnl-proxy + +cd .. + +if [ "$with_artifacts" = true ]; then + echo Creating artifacts... + rm -rf artifacts + mkdir artifacts + cp crypto/fift/lib artifacts/ + cp -R crypto/smartcont/ artifacts/ + cp build/storage/storage-daemon/storage-daemon artifacts/ + cp build/storage/storage-daemon/storage-daemon-cli artifacts/ + cp build/blockchain-explorer/blockchain-explorer artifacts/ + cp build/crypto/fift artifacts/ + cp build/crypto/func artifacts/ + cp build/crypto/create-state artifacts/ + cp build/crypto/tlbc artifacts/ + cp build/validator-engine-console/validator-engine-console artifacts/ + cp build/tonlib/tonlib-cli artifacts/ + cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib + cp build/http/http-proxy artifacts/ + cp build/rldp-http-proxy/rldp-http-proxy artifacts/ + cp build/dht-server/dht-server artifacts/ + cp build/lite-client/lite-client artifacts/ + cp build/validator-engine/validator-engine artifacts/ + cp build/utils/generate-random-id artifacts/ + cp build/utils/json2tlo artifacts/ + cp build/adnl/adnl-proxy artifacts/ + cp build/emulator/libemulator.dylib artifacts/ + chmod +x artifacts/* + rsync -r crypto/smartcont artifacts/ + rsync -r crypto/fift/lib artifacts/ +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors" + ctest --output-on-failure +fi diff --git a/assembly/native/build-macos-shared.sh b/assembly/native/build-macos-shared.sh new file mode 100644 index 00000000..7b4f90ee --- /dev/null +++ b/assembly/native/build-macos-shared.sh @@ -0,0 +1,136 @@ +#/bin/bash + +with_tests=false +with_artifacts=false +OSX_TARGET=10.15 + + +while getopts 'tao:' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + o) OSX_TARGET=${OPTARG} ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export NONINTERACTIVE=1 +brew install ninja libsodium libmicrohttpd pkg-config automake libtool autoconf gnutls +brew install llvm@16 + +if [ -f /opt/homebrew/opt/llvm@16/bin/clang ]; then + export CC=/opt/homebrew/opt/llvm@16/bin/clang + export CXX=/opt/homebrew/opt/llvm@16/bin/clang++ +else + export CC=/usr/local/opt/llvm@16/bin/clang + export CXX=/usr/local/opt/llvm@16/bin/clang++ +fi +export CCACHE_DISABLE=1 + +if [ ! -d "secp256k1" ]; then + git clone https://github.com/bitcoin-core/secp256k1.git + cd secp256k1 + secp256k1Path=`pwd` + git checkout v0.3.2 + ./autogen.sh + ./configure --enable-module-recovery --enable-static --disable-tests --disable-benchmark + make -j12 + test $? -eq 0 || { echo "Can't compile secp256k1"; exit 1; } + cd .. +else + secp256k1Path=$(pwd)/secp256k1 + echo "Using compiled secp256k1" +fi + +brew unlink openssl@1.1 +brew install openssl@3 +brew unlink openssl@3 && brew link --overwrite openssl@3 + +cmake -GNinja -DCMAKE_BUILD_TYPE=Release .. \ +-DCMAKE_CXX_FLAGS="-stdlib=libc++" \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=$secp256k1Path/include \ +-DSECP256K1_LIBRARY=$secp256k1Path/.libs/libsecp256k1.a + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator \ + test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont \ + test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp \ + test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else + ninja storage-daemon storage-daemon-cli blockchain-explorer \ + tonlib tonlibjson tonlib-cli validator-engine func fift \ + lite-client pow-miner validator-engine-console generate-random-id json2tlo dht-server \ + http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork tlbc emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + + +strip storage/storage-daemon/storage-daemon +strip storage/storage-daemon/storage-daemon-cli +strip blockchain-explorer/blockchain-explorer +strip crypto/fift +strip crypto/func +strip crypto/create-state +strip crypto/tlbc +strip validator-engine-console/validator-engine-console +strip tonlib/tonlib-cli +strip http/http-proxy +strip rldp-http-proxy/rldp-http-proxy +strip dht-server/dht-server +strip lite-client/lite-client +strip validator-engine/validator-engine +strip utils/generate-random-id +strip utils/json2tlo +strip adnl/adnl-proxy + +cd .. + +if [ "$with_artifacts" = true ]; then + echo Creating artifacts... + rm -rf artifacts + mkdir artifacts + cp build/storage/storage-daemon/storage-daemon artifacts/ + cp build/storage/storage-daemon/storage-daemon-cli artifacts/ + cp build/blockchain-explorer/blockchain-explorer artifacts/ + cp build/crypto/fift artifacts/ + cp build/crypto/func artifacts/ + cp build/crypto/create-state artifacts/ + cp build/crypto/tlbc artifacts/ + cp build/validator-engine-console/validator-engine-console artifacts/ + cp build/tonlib/tonlib-cli artifacts/ + cp build/tonlib/libtonlibjson.0.5.dylib artifacts/libtonlibjson.dylib + cp build/http/http-proxy artifacts/ + cp build/rldp-http-proxy/rldp-http-proxy artifacts/ + cp build/dht-server/dht-server artifacts/ + cp build/lite-client/lite-client artifacts/ + cp build/validator-engine/validator-engine artifacts/ + cp build/utils/generate-random-id artifacts/ + cp build/utils/json2tlo artifacts/ + cp build/adnl/adnl-proxy artifacts/ + cp build/emulator/libemulator.dylib artifacts/ + chmod +x artifacts/* + rsync -r crypto/smartcont artifacts/ + rsync -r crypto/fift/lib artifacts/ +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors" + ctest --output-on-failure --timeout 1800 +fi diff --git a/assembly/native/build-ubuntu-portable.sh b/assembly/native/build-ubuntu-portable.sh new file mode 100644 index 00000000..81dbe710 --- /dev/null +++ b/assembly/native/build-ubuntu-portable.sh @@ -0,0 +1,198 @@ +#/bin/bash + +#sudo apt-get update +#sudo apt-get install -y build-essential git cmake ninja-build automake libtool texinfo autoconf + +with_tests=false +with_artifacts=false + + +while getopts 'ta' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export CC=$(which clang-16) +export CXX=$(which clang++-16) +export CCACHE_DISABLE=1 + + +if [ ! -d "secp256k1" ]; then +git clone https://github.com/bitcoin-core/secp256k1.git +cd secp256k1 +secp256k1Path=`pwd` +git checkout v0.3.2 +./autogen.sh +./configure --enable-module-recovery --enable-static --disable-tests --disable-benchmark --with-pic +make -j12 +test $? -eq 0 || { echo "Can't compile secp256k1"; exit 1; } +cd .. +# ./.libs/libsecp256k1.a +# ./include +else + secp256k1Path=$(pwd)/secp256k1 + echo "Using compiled secp256k1" +fi + +if [ ! -d "libsodium" ]; then + export LIBSODIUM_FULL_BUILD=1 + git clone https://github.com/jedisct1/libsodium.git + cd libsodium + sodiumPath=`pwd` + git checkout 1.0.18 + ./autogen.sh + ./configure --with-pic --enable-static + make -j12 + test $? -eq 0 || { echo "Can't compile libsodium"; exit 1; } + cd .. +else + sodiumPath=$(pwd)/libsodium + echo "Using compiled libsodium" +fi + +if [ ! -d "openssl_3" ]; then + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + opensslPath=`pwd` + git checkout openssl-3.1.4 + ./config -static + make build_libs -j12 + test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; } + cd .. +else + opensslPath=$(pwd)/openssl_3 + echo "Using compiled openssl_3" +fi + +if [ ! -d "zlib" ]; then + git clone https://github.com/madler/zlib.git + cd zlib + zlibPath=`pwd` + ./configure --static + make -j12 + test $? -eq 0 || { echo "Can't compile zlib"; exit 1; } + cd .. +else + zlibPath=$(pwd)/zlib + echo "Using compiled zlib" +fi + +if [ ! -d "libmicrohttpd" ]; then + git clone https://git.gnunet.org/libmicrohttpd.git + cd libmicrohttpd + libmicrohttpdPath=`pwd` + ./autogen.sh + ./configure --enable-static --disable-tests --disable-benchmark --disable-shared --disable-https --with-pic + make -j12 + test $? -eq 0 || { echo "Can't compile libmicrohttpd"; exit 1; } + cd .. +else + libmicrohttpdPath=$(pwd)/libmicrohttpd + echo "Using compiled libmicrohttpd" +fi + +cmake -GNinja .. \ +-DPORTABLE=1 \ +-DCMAKE_BUILD_TYPE=Release \ +-DOPENSSL_FOUND=1 \ +-DOPENSSL_INCLUDE_DIR=$opensslPath/include \ +-DOPENSSL_CRYPTO_LIBRARY=$opensslPath/libcrypto.a \ +-DZLIB_FOUND=1 \ +-DZLIB_INCLUDE_DIR=$zlibPath \ +-DZLIB_LIBRARIES=$zlibPath/libz.a \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=$secp256k1Path/include \ +-DSECP256K1_LIBRARY=$secp256k1Path/.libs/libsecp256k1.a \ +-DSODIUM_FOUND=1 \ +-DSODIUM_INCLUDE_DIR=$sodiumPath/src/libsodium/include \ +-DSODIUM_LIBRARY_RELEASE=$sodiumPath/src/libsodium/.libs/libsodium.a \ +-DMHD_FOUND=1 \ +-DMHD_INCLUDE_DIR=$libmicrohttpdPath/src/include \ +-DMHD_LIBRARY=$libmicrohttpdPath/src/microhttpd/.libs/libmicrohttpd.a + + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \ + test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \ + test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \ + test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + +strip -g storage/storage-daemon/storage-daemon \ + storage/storage-daemon/storage-daemon-cli \ + blockchain-explorer/blockchain-explorer \ + crypto/fift \ + crypto/tlbc \ + crypto/func \ + crypto/create-state \ + validator-engine-console/validator-engine-console \ + tonlib/tonlib-cli \ + tonlib/libtonlibjson.so.0.5 \ + http/http-proxy \ + rldp-http-proxy/rldp-http-proxy \ + dht-server/dht-server \ + lite-client/lite-client \ + validator-engine/validator-engine \ + utils/generate-random-id \ + utils/json2tlo \ + adnl/adnl-proxy \ + emulator/libemulator.* + +test $? -eq 0 || { echo "Can't strip final binaries"; exit 1; } + +# simple binaries' test +./storage/storage-daemon/storage-daemon -V || exit 1 +./validator-engine/validator-engine -V || exit 1 +./lite-client/lite-client -V || exit 1 +./crypto/fift -V || exit 1 + +cd .. + +if [ "$with_artifacts" = true ]; then + rm -rf artifacts + mkdir artifacts + cp crypto/fift/lib artifacts/ + cp -R crypto/smartcont/ artifacts/ + mv build/tonlib/libtonlibjson.so.0.5 build/tonlib/libtonlibjson.so + cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli \ + build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/blockchain-explorer/blockchain-explorer \ + build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli \ + build/tonlib/libtonlibjson.so build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy \ + build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine \ + build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.so \ + artifacts + test $? -eq 0 || { echo "Can't copy final binaries"; exit 1; } + chmod +x artifacts/* + cp -R crypto/smartcont artifacts + cp -R crypto/fift/lib artifacts +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors|test-smartcont|test-adnl|test-validator-session-state|test-dht|test-rldp" + ctest --output-on-failure -E "test-adnl" +fi diff --git a/assembly/native/build-ubuntu-shared.sh b/assembly/native/build-ubuntu-shared.sh new file mode 100644 index 00000000..12c819cd --- /dev/null +++ b/assembly/native/build-ubuntu-shared.sh @@ -0,0 +1,122 @@ +#/bin/bash + +#sudo apt-get update +#sudo apt-get install -y build-essential git cmake ninja-build zlib1g-dev libsecp256k1-dev libmicrohttpd-dev libsodium-dev + +with_tests=false +with_artifacts=false + + +while getopts 'ta' flag; do + case "${flag}" in + t) with_tests=true ;; + a) with_artifacts=true ;; + *) break + ;; + esac +done + +if [ ! -d "build" ]; then + mkdir build + cd build +else + cd build + rm -rf .ninja* CMakeCache.txt +fi + +export CC=$(which clang-16) +export CXX=$(which clang++-16) +export CCACHE_DISABLE=1 + +if [ ! -d "openssl_3" ]; then + git clone https://github.com/openssl/openssl openssl_3 + cd openssl_3 + opensslPath=`pwd` + git checkout openssl-3.1.4 + ./config + make build_libs -j12 + test $? -eq 0 || { echo "Can't compile openssl_3"; exit 1; } + cd .. +else + opensslPath=$(pwd)/openssl_3 + echo "Using compiled openssl_3" +fi + +cmake -GNinja .. \ +-DCMAKE_BUILD_TYPE=Release \ +-DOPENSSL_ROOT_DIR=$opensslPath \ +-DOPENSSL_INCLUDE_DIR=$opensslPath/include \ +-DOPENSSL_CRYPTO_LIBRARY=$opensslPath/libcrypto.so + + +test $? -eq 0 || { echo "Can't configure ton"; exit 1; } + +if [ "$with_tests" = true ]; then +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator test-ed25519 test-ed25519-crypto test-bigint \ + test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils \ + test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain \ + test-fec test-tddb test-db test-validator-session-state + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +else +ninja storage-daemon storage-daemon-cli fift func tonlib tonlibjson tonlib-cli \ + validator-engine lite-client pow-miner validator-engine-console blockchain-explorer \ + generate-random-id json2tlo dht-server http-proxy rldp-http-proxy \ + adnl-proxy create-state emulator + test $? -eq 0 || { echo "Can't compile ton"; exit 1; } +fi + +strip -g storage/storage-daemon/storage-daemon \ + storage/storage-daemon/storage-daemon-cli \ + blockchain-explorer/blockchain-explorer \ + crypto/fift \ + crypto/tlbc \ + crypto/func \ + crypto/create-state \ + validator-engine-console/validator-engine-console \ + tonlib/tonlib-cli \ + tonlib/libtonlibjson.so.0.5 \ + http/http-proxy \ + rldp-http-proxy/rldp-http-proxy \ + dht-server/dht-server \ + lite-client/lite-client \ + validator-engine/validator-engine \ + utils/generate-random-id \ + utils/json2tlo \ + adnl/adnl-proxy \ + emulator/libemulator.* + +test $? -eq 0 || { echo "Can't strip final binaries"; exit 1; } + +# simple binaries' test +./storage/storage-daemon/storage-daemon -V || exit 1 +./validator-engine/validator-engine -V || exit 1 +./lite-client/lite-client -V || exit 1 +./crypto/fift -V || exit 1 + +cd .. + +if [ "$with_artifacts" = true ]; then + rm -rf artifacts + mkdir artifacts + mv build/tonlib/libtonlibjson.so.0.5 build/tonlib/libtonlibjson.so + cp build/storage/storage-daemon/storage-daemon build/storage/storage-daemon/storage-daemon-cli \ + build/crypto/fift build/crypto/tlbc build/crypto/func build/crypto/create-state build/blockchain-explorer/blockchain-explorer \ + build/validator-engine-console/validator-engine-console build/tonlib/tonlib-cli \ + build/tonlib/libtonlibjson.so build/http/http-proxy build/rldp-http-proxy/rldp-http-proxy \ + build/dht-server/dht-server build/lite-client/lite-client build/validator-engine/validator-engine \ + build/utils/generate-random-id build/utils/json2tlo build/adnl/adnl-proxy build/emulator/libemulator.so \ + artifacts + test $? -eq 0 || { echo "Can't copy final binaries"; exit 1; } + chmod +x artifacts/* + cp -R crypto/smartcont artifacts + cp -R crypto/fift/lib artifacts +fi + +if [ "$with_tests" = true ]; then + cd build +# ctest --output-on-failure -E "test-catchain|test-actors|test-smartcont|test-adnl|test-validator-session-state|test-dht|test-rldp" + ctest --output-on-failure --timeout 1800 +fi \ No newline at end of file diff --git a/assembly/native/build-windows-github.bat b/assembly/native/build-windows-github.bat new file mode 100644 index 00000000..7cad8c7e --- /dev/null +++ b/assembly/native/build-windows-github.bat @@ -0,0 +1,2 @@ +call "C:\Program Files\Microsoft Visual Studio\2022\%1\VC\Auxiliary\Build\vcvars64.bat" +call build-windows.bat -t \ No newline at end of file diff --git a/assembly/native/build-windows.bat b/assembly/native/build-windows.bat new file mode 100644 index 00000000..2e3f2082 --- /dev/null +++ b/assembly/native/build-windows.bat @@ -0,0 +1,193 @@ +REM execute this script inside elevated (Run as Administrator) console "x64 Native Tools Command Prompt for VS 2022" + +echo off + +echo Installing chocolatey windows package manager... +@"%SystemRoot%\System32\WindowsPowerShell\v1.0\powershell.exe" -NoProfile -InputFormat None -ExecutionPolicy Bypass -Command "iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))" && SET "PATH=%PATH%;%ALLUSERSPROFILE%\chocolatey\bin" +choco -? +IF %errorlevel% NEQ 0 ( + echo Can't install chocolatey + exit /b %errorlevel% +) + +choco feature enable -n allowEmptyChecksums + +echo Installing pkgconfiglite... +choco install -y pkgconfiglite +IF errorlevel 1 ( + echo Can't install pkgconfiglite + exit /b %errorlevel% +) + +echo Installing ninja... +choco install -y ninja +IF errorlevel 1 ( + echo Can't install ninja + exit /b %errorlevel% +) + +if not exist "zlib" ( +git clone https://github.com/madler/zlib.git +cd zlib\contrib\vstudio\vc14 +msbuild zlibstat.vcxproj /p:Configuration=ReleaseWithoutAsm /p:platform=x64 -p:PlatformToolset=v143 + +IF errorlevel 1 ( + echo Can't install zlib + exit /b %errorlevel% +) +cd ..\..\..\.. +) else ( +echo Using zlib... +) + +if not exist "secp256k1" ( +git clone https://github.com/libbitcoin/secp256k1.git +cd secp256k1\builds\msvc\vs2017 +msbuild /p:Configuration=StaticRelease -p:PlatformToolset=v143 -p:Platform=x64 +IF errorlevel 1 ( + echo Can't install secp256k1 + exit /b %errorlevel% +) +cd ..\..\..\.. +) else ( +echo Using secp256k1... +) + + +if not exist "libsodium" ( +curl -Lo libsodium-1.0.18-stable-msvc.zip https://download.libsodium.org/libsodium/releases/libsodium-1.0.18-stable-msvc.zip +IF errorlevel 1 ( + echo Can't download libsodium + exit /b %errorlevel% +) +unzip libsodium-1.0.18-stable-msvc.zip +) else ( +echo Using libsodium... +) + +if not exist "openssl-3.1.4" ( +curl -Lo openssl-3.1.4.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/openssl-3.1.4.zip +IF errorlevel 1 ( + echo Can't download OpenSSL + exit /b %errorlevel% +) +unzip -q openssl-3.1.4.zip +) else ( +echo Using openssl... +) + +if not exist "libmicrohttpd-0.9.77-w32-bin" ( +curl -Lo libmicrohttpd-0.9.77-w32-bin.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/libmicrohttpd-0.9.77-w32-bin.zip +IF errorlevel 1 ( + echo Can't download libmicrohttpd + exit /b %errorlevel% +) +unzip -q libmicrohttpd-0.9.77-w32-bin.zip +) else ( +echo Using libmicrohttpd... +) + +if not exist "readline-5.0-1-lib" ( +curl -Lo readline-5.0-1-lib.zip https://github.com/neodiX42/precompiled-openssl-win64/raw/main/readline-5.0-1-lib.zip +IF errorlevel 1 ( + echo Can't download readline + exit /b %errorlevel% +) +unzip -q -d readline-5.0-1-lib readline-5.0-1-lib.zip +) else ( +echo Using readline... +) + + +set root=%cd% +echo %root% +set SODIUM_DIR=%root%\libsodium + +mkdir build +cd build +cmake -GNinja -DCMAKE_BUILD_TYPE=Release ^ +-DPORTABLE=1 ^ +-DSODIUM_USE_STATIC_LIBS=1 ^ +-DSECP256K1_FOUND=1 ^ +-DSECP256K1_INCLUDE_DIR=%root%\secp256k1\include ^ +-DSECP256K1_LIBRARY=%root%\secp256k1\bin\x64\Release\v143\static\secp256k1.lib ^ +-DMHD_FOUND=1 ^ +-DMHD_LIBRARY=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static\libmicrohttpd.lib ^ +-DMHD_INCLUDE_DIR=%root%\libmicrohttpd-0.9.77-w32-bin\x86_64\VS2019\Release-static ^ +-DZLIB_FOUND=1 ^ +-DZLIB_INCLUDE_DIR=%root%\zlib ^ +-DZLIB_LIBRARIES=%root%\zlib\contrib\vstudio\vc14\x64\ZlibStatReleaseWithoutAsm\zlibstat.lib ^ +-DOPENSSL_FOUND=1 ^ +-DOPENSSL_INCLUDE_DIR=%root%/openssl-3.1.4/x64/include ^ +-DOPENSSL_CRYPTO_LIBRARY=%root%/openssl-3.1.4/x64/lib/libcrypto_static.lib ^ +-DCMAKE_CXX_FLAGS="/DTD_WINDOWS=1 /EHsc /bigobj" .. +IF errorlevel 1 ( + echo Can't configure TON + exit /b %errorlevel% +) + +IF "%1"=="-t" ( +ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^ +tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^ +json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator ^ +test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net ^ +test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain ^ +test-fec test-tddb test-db test-validator-session-state +IF errorlevel 1 ( + echo Can't compile TON + exit /b %errorlevel% +) +) else ( +ninja storage-daemon storage-daemon-cli blockchain-explorer fift func tonlib tonlibjson ^ +tonlib-cli validator-engine lite-client pow-miner validator-engine-console generate-random-id ^ +json2tlo dht-server http-proxy rldp-http-proxy adnl-proxy create-state create-hardfork emulator +IF errorlevel 1 ( + echo Can't compile TON + exit /b %errorlevel% +) +) + +copy validator-engine\validator-engine.exe test +IF errorlevel 1 ( + echo validator-engine.exe does not exist + exit /b %errorlevel% +) + +IF "%1"=="-t" ( + echo Running tests... +REM ctest -C Release --output-on-failure -E "test-catchain|test-actors|test-validator-session-state" + ctest -C Release --output-on-failure --timeout 1800 + IF errorlevel 1 ( + echo Some tests failed + exit /b %errorlevel% + ) +) + + +echo Creating artifacts... +cd .. +mkdir artifacts +mkdir artifacts\smartcont +mkdir artifacts\lib + +for %%I in (build\storage\storage-daemon\storage-daemon.exe ^ +build\storage\storage-daemon\storage-daemon-cli.exe ^ +build\blockchain-explorer\blockchain-explorer.exe ^ +build\crypto\fift.exe ^ +build\crypto\tlbc.exe ^ +build\crypto\func.exe ^ +build\crypto\create-state.exe ^ +build\validator-engine-console\validator-engine-console.exe ^ +build\tonlib\tonlib-cli.exe ^ +build\tonlib\tonlibjson.dll ^ +build\http\http-proxy.exe ^ +build\rldp-http-proxy\rldp-http-proxy.exe ^ +build\dht-server\dht-server.exe ^ +build\lite-client\lite-client.exe ^ +build\validator-engine\validator-engine.exe ^ +build\utils\generate-random-id.exe ^ +build\utils\json2tlo.exe ^ +build\adnl\adnl-proxy.exe ^ +build\emulator\emulator.dll) do (strip -g %%I & copy %%I artifacts\) +xcopy /e /k /h /i crypto\smartcont artifacts\smartcont +xcopy /e /k /h /i crypto\fift\lib artifacts\lib diff --git a/assembly/nix/build-linux-arm64-nix.sh b/assembly/nix/build-linux-arm64-nix.sh new file mode 100644 index 00000000..08817e72 --- /dev/null +++ b/assembly/nix/build-linux-arm64-nix.sh @@ -0,0 +1,35 @@ +#/bin/bash + +nix-build --version +test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } + +with_tests=false + + +while getopts 't' flag; do + case "${flag}" in + t) with_tests=true ;; + *) break + ;; + esac +done + +cp assembly/nix/linux-arm64* . +cp assembly/nix/microhttpd.nix . +cp assembly/nix/openssl.nix . +export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +if [ "$with_tests" = true ]; then + nix-build linux-arm64-static.nix --arg testing true +else + nix-build linux-arm64-static.nix +fi +mkdir artifacts +cp ./result/bin/* artifacts/ +chmod +x artifacts/* +rm -rf result +nix-build linux-arm64-tonlib.nix +cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so +cp ./result/lib/libemulator.so artifacts/ +cp -r crypto/fift/lib artifacts/ +cp -r crypto/smartcont artifacts/ diff --git a/assembly/nix/build-linux-x86-64-nix.sh b/assembly/nix/build-linux-x86-64-nix.sh new file mode 100644 index 00000000..60d31c94 --- /dev/null +++ b/assembly/nix/build-linux-x86-64-nix.sh @@ -0,0 +1,36 @@ +#/bin/bash + +nix-build --version +test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } + +with_tests=false + + +while getopts 't' flag; do + case "${flag}" in + t) with_tests=true ;; + *) break + ;; + esac +done + +cp assembly/nix/linux-x86-64* . +cp assembly/nix/microhttpd.nix . +cp assembly/nix/openssl.nix . +export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +if [ "$with_tests" = true ]; then + nix-build linux-x86-64-static.nix --arg testing true +else + nix-build linux-x86-64-static.nix +fi + +mkdir artifacts +cp ./result/bin/* artifacts/ +chmod +x artifacts/* +rm -rf result +nix-build linux-x86-64-tonlib.nix +cp ./result/lib/libtonlibjson.so.0.5 artifacts/libtonlibjson.so +cp ./result/lib/libemulator.so artifacts/ +cp -r crypto/fift/lib artifacts/ +cp -r crypto/smartcont artifacts/ diff --git a/assembly/nix/build-macos-nix.sh b/assembly/nix/build-macos-nix.sh new file mode 100644 index 00000000..c3664bf0 --- /dev/null +++ b/assembly/nix/build-macos-nix.sh @@ -0,0 +1,33 @@ +#/bin/bash + +nix-build --version +test $? -eq 0 || { echo "Nix is not installed!"; exit 1; } + +with_tests=false + + +while getopts 't' flag; do + case "${flag}" in + t) with_tests=true ;; + *) break + ;; + esac +done + +cp assembly/nix/macos-* . +export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +if [ "$with_tests" = true ]; then + nix-build macos-static.nix --arg testing true +else + nix-build macos-static.nix +fi +mkdir artifacts +cp ./result-bin/bin/* artifacts/ +chmod +x artifacts/* +rm -rf result-bin +nix-build macos-tonlib.nix +cp ./result/lib/libtonlibjson.dylib artifacts/ +cp ./result/lib/libemulator.dylib artifacts/ +cp -r crypto/fift/lib artifacts/ +cp -r crypto/smartcont artifacts/ diff --git a/flake.lock b/assembly/nix/flakes/flake.lock similarity index 94% rename from flake.lock rename to assembly/nix/flakes/flake.lock index ca44d4c0..d22f15d3 100644 --- a/flake.lock +++ b/assembly/nix/flakes/flake.lock @@ -36,8 +36,8 @@ }, "nixpkgs-stable": { "locked": { - "lastModified": 1682600000, - "narHash": "sha256-ha4BehR1dh8EnXSoE1m/wyyYVvHI9txjW4w5/oxsW5Y=", + "lastModified": 1698846319, + "narHash": "sha256-4jyW/dqFBVpWFnhl0nvP6EN4lP7/ZqPxYRjl6var0Oc=", "owner": "nixos", "repo": "nixpkgs", "rev": "50fc86b75d2744e1ab3837ef74b53f103a9b55a0", @@ -45,7 +45,7 @@ }, "original": { "owner": "nixos", - "ref": "nixos-22.05", + "ref": "nixos-23.05", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/assembly/nix/flakes/flake.nix similarity index 95% rename from flake.nix rename to assembly/nix/flakes/flake.nix index cb70b5bb..4e993ac5 100644 --- a/flake.nix +++ b/assembly/nix/flakes/flake.nix @@ -1,6 +1,6 @@ { inputs = { - nixpkgs-stable.url = "github:nixos/nixpkgs/nixos-22.05"; + nixpkgs-stable.url = "github:nixos/nixpkgs/nixos-23.05"; nixpkgs-trunk.url = "github:nixos/nixpkgs"; flake-compat = { url = "github:edolstra/flake-compat"; @@ -28,14 +28,14 @@ # then we can skip these manual overrides # and switch between pkgsStatic and pkgsStatic.pkgsMusl for static glibc and musl builds if !staticExternalDeps then [ - openssl_1_1 + openssl zlib libmicrohttpd libsodium secp256k1 ] else [ - (openssl_1_1.override { static = true; }).dev + (openssl.override { static = true; }).dev (zlib.override { shared = false; }).dev ] ++ optionals (!stdenv.isDarwin) [ pkgsStatic.libmicrohttpd.dev pkgsStatic.libsodium.dev secp256k1 ] @@ -50,6 +50,8 @@ ] ++ optionals (staticGlibc || staticMusl) [ "-DCMAKE_LINK_SEARCH_START_STATIC=ON" "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + ] ++ optionals (stdenv.isDarwin) [ + "-DCMAKE_CXX_FLAGS=-stdlib=libc++" "-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.7" ]; LDFLAGS = optional staticExternalDeps (concatStringsSep " " [ diff --git a/shell.nix b/assembly/nix/flakes/shell.nix similarity index 100% rename from shell.nix rename to assembly/nix/flakes/shell.nix diff --git a/assembly/nix/linux-arm64-static.nix b/assembly/nix/linux-arm64-static.nix new file mode 100644 index 00000000..5e834269 --- /dev/null +++ b/assembly/nix/linux-arm64-static.nix @@ -0,0 +1,46 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +, testing ? false +}: +let + microhttpdmy = (import ./microhttpd.nix) {}; +in +with import microhttpdmy; +stdenv.mkDerivation { + pname = "ton"; + version = "dev-bin"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ + cmake ninja git pkg-config + ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl microhttpdmy pkgsStatic.zlib pkgsStatic.libsodium.dev pkgsStatic.secp256k1 glibc.static + ]; + + makeStatic = true; + doCheck = testing; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DBUILD_SHARED_LIBS=OFF" + "-DCMAKE_LINK_SEARCH_START_STATIC=ON" + "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + "-DMHD_FOUND=1" + "-DMHD_INCLUDE_DIR=${microhttpdmy}/usr/local/include" + "-DMHD_LIBRARY=${microhttpdmy}/usr/local/lib/libmicrohttpd.a" + "-DCMAKE_CTEST_ARGUMENTS=--timeout;1800" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-static" + ]; +} diff --git a/assembly/nix/linux-arm64-tonlib.nix b/assembly/nix/linux-arm64-tonlib.nix new file mode 100644 index 00000000..a753423b --- /dev/null +++ b/assembly/nix/linux-arm64-tonlib.nix @@ -0,0 +1,44 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz +{ + pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: +let + microhttpdmy = (import ./microhttpd.nix) {}; +in +with import microhttpdmy; +pkgs.llvmPackages_16.stdenv.mkDerivation { + pname = "ton"; + version = "dev-lib"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ + cmake ninja git pkg-config + ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl microhttpdmy pkgsStatic.zlib pkgsStatic.libsodium.dev pkgsStatic.secp256k1 + ]; + + dontAddStaticConfigureFlags = false; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DMHD_FOUND=1" + "-DMHD_INCLUDE_DIR=${microhttpdmy}/usr/local/include" + "-DMHD_LIBRARY=${microhttpdmy}/usr/local/lib/libmicrohttpd.a" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-fPIC" "-fcommon" + ]; + + ninjaFlags = [ + "tonlibjson" "emulator" + ]; +} diff --git a/assembly/nix/linux-x86-64-static.nix b/assembly/nix/linux-x86-64-static.nix new file mode 100644 index 00000000..5e834269 --- /dev/null +++ b/assembly/nix/linux-x86-64-static.nix @@ -0,0 +1,46 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +, testing ? false +}: +let + microhttpdmy = (import ./microhttpd.nix) {}; +in +with import microhttpdmy; +stdenv.mkDerivation { + pname = "ton"; + version = "dev-bin"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ + cmake ninja git pkg-config + ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl microhttpdmy pkgsStatic.zlib pkgsStatic.libsodium.dev pkgsStatic.secp256k1 glibc.static + ]; + + makeStatic = true; + doCheck = testing; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DBUILD_SHARED_LIBS=OFF" + "-DCMAKE_LINK_SEARCH_START_STATIC=ON" + "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + "-DMHD_FOUND=1" + "-DMHD_INCLUDE_DIR=${microhttpdmy}/usr/local/include" + "-DMHD_LIBRARY=${microhttpdmy}/usr/local/lib/libmicrohttpd.a" + "-DCMAKE_CTEST_ARGUMENTS=--timeout;1800" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-static" + ]; +} diff --git a/assembly/nix/linux-x86-64-tonlib.nix b/assembly/nix/linux-x86-64-tonlib.nix new file mode 100644 index 00000000..ac183d2b --- /dev/null +++ b/assembly/nix/linux-x86-64-tonlib.nix @@ -0,0 +1,54 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.11.tar.gz +# copy linux-x86-64-tonlib.nix to git root directory and execute: +# nix-build linux-x86-64-tonlib.nix +{ + pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: +let + system = builtins.currentSystem; + + nixos1909 = (import (builtins.fetchTarball { + url = "https://channels.nixos.org/nixos-19.09/nixexprs.tar.xz"; + sha256 = "1vp1h2gkkrckp8dzkqnpcc6xx5lph5d2z46sg2cwzccpr8ay58zy"; + }) { inherit system; }); + glibc227 = nixos1909.glibc // { pname = "glibc"; }; + stdenv227 = let + cc = pkgs.wrapCCWith { + cc = nixos1909.buildPackages.gcc-unwrapped; + libc = glibc227; + bintools = pkgs.binutils.override { libc = glibc227; }; + }; + in (pkgs.overrideCC pkgs.stdenv cc); + +in +stdenv227.mkDerivation { + pname = "ton"; + version = "dev-lib"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ cmake ninja git pkg-config ]; + + buildInputs = with pkgs; + [ + pkgsStatic.openssl pkgsStatic.zlib pkgsStatic.libmicrohttpd.dev pkgsStatic.libsodium.dev pkgsStatic.secp256k1 + ]; + + dontAddStaticConfigureFlags = false; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + ]; + + LDFLAGS = [ + "-static-libgcc" "-static-libstdc++" "-fPIC" + ]; + + ninjaFlags = [ + "tonlibjson" "emulator" + ]; +} diff --git a/assembly/nix/macos-static.nix b/assembly/nix/macos-static.nix new file mode 100644 index 00000000..a3d4667f --- /dev/null +++ b/assembly/nix/macos-static.nix @@ -0,0 +1,66 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +, testing ? false +}: + +pkgs.llvmPackages_14.stdenv.mkDerivation { + pname = "ton"; + version = "dev-bin"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ cmake ninja git pkg-config ]; + + buildInputs = with pkgs; + lib.forEach [ + secp256k1 libsodium.dev libmicrohttpd.dev gmp.dev nettle.dev libtasn1.dev libidn2.dev libunistring.dev gettext (gnutls.override { withP11-kit = false; }).dev + ] + (x: x.overrideAttrs(oldAttrs: rec { configureFlags = (oldAttrs.configureFlags or []) ++ [ "--enable-static" "--disable-shared" "--disable-tests" ]; dontDisableStatic = true; })) + ++ [ + darwin.apple_sdk.frameworks.CoreFoundation + (openssl.override { static = true; }).dev + (zlib.override { shared = false; }).dev + (libiconv.override { enableStatic = true; enableShared = false; }) + ]; + + + dontAddStaticConfigureFlags = true; + makeStatic = true; + doCheck = testing; + + configureFlags = []; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DCMAKE_CROSSCOMPILING=OFF" + "-DCMAKE_LINK_SEARCH_START_STATIC=ON" + "-DCMAKE_LINK_SEARCH_END_STATIC=ON" + "-DBUILD_SHARED_LIBS=OFF" + "-DCMAKE_CXX_FLAGS=-stdlib=libc++" + "-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.3" + "-DCMAKE_CTEST_ARGUMENTS=--timeout;1800" + ]; + + LDFLAGS = [ + "-static-libstdc++" + "-framework CoreFoundation" + ]; + + postInstall = '' + moveToOutput bin "$bin" + ''; + + preFixup = '' + for fn in "$bin"/bin/* "$out"/lib/*.dylib; do + echo Fixing libc++ in "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++.1 | cut -d' ' -f1 | xargs)" libc++.1.dylib "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++abi.1 | cut -d' ' -f1 | xargs)" libc++abi.dylib "$fn" + done + ''; + outputs = [ "bin" "out" ]; +} diff --git a/assembly/nix/macos-tonlib.nix b/assembly/nix/macos-tonlib.nix new file mode 100644 index 00000000..c362de4e --- /dev/null +++ b/assembly/nix/macos-tonlib.nix @@ -0,0 +1,55 @@ +# export NIX_PATH=nixpkgs=https://github.com/nixOS/nixpkgs/archive/23.05.tar.gz + +{ pkgs ? import { system = builtins.currentSystem; } +, lib ? pkgs.lib +, stdenv ? pkgs.stdenv +}: + +pkgs.llvmPackages_14.stdenv.mkDerivation { + pname = "ton"; + version = "dev-lib"; + + src = ./.; + + nativeBuildInputs = with pkgs; + [ cmake ninja git pkg-config ]; + + buildInputs = with pkgs; + lib.forEach [ + secp256k1 libsodium.dev libmicrohttpd.dev gmp.dev nettle.dev libtasn1.dev libidn2.dev libunistring.dev gettext (gnutls.override { withP11-kit = false; }).dev + ] (x: x.overrideAttrs(oldAttrs: rec { configureFlags = (oldAttrs.configureFlags or []) ++ [ "--enable-static" "--disable-shared" "--disable-tests" ]; dontDisableStatic = true; })) + ++ [ + darwin.apple_sdk.frameworks.CoreFoundation + (openssl.override { static = true; }).dev + (zlib.override { shared = false; }).dev + (libiconv.override { enableStatic = true; enableShared = false; }) + ]; + + dontAddStaticConfigureFlags = true; + + configureFlags = []; + + cmakeFlags = [ + "-DTON_USE_ABSEIL=OFF" + "-DNIX=ON" + "-DCMAKE_CXX_FLAGS=-stdlib=libc++" + "-DCMAKE_OSX_DEPLOYMENT_TARGET:STRING=11.3" + ]; + + LDFLAGS = [ + "-static-libstdc++" + "-framework CoreFoundation" + ]; + + ninjaFlags = [ + "tonlibjson" "emulator" + ]; + + preFixup = '' + for fn in $out/bin/* $out/lib/*.dylib; do + echo Fixing libc++ in "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++.1 | cut -d' ' -f1 | xargs)" libc++.1.dylib "$fn" + install_name_tool -change "$(otool -L "$fn" | grep libc++abi.1 | cut -d' ' -f1 | xargs)" libc++abi.dylib "$fn" + done + ''; +} \ No newline at end of file diff --git a/assembly/nix/microhttpd.nix b/assembly/nix/microhttpd.nix new file mode 100644 index 00000000..4f871425 --- /dev/null +++ b/assembly/nix/microhttpd.nix @@ -0,0 +1,28 @@ +{ pkgs ? import { system = builtins.currentSystem; } +, stdenv ? pkgs.stdenv +, fetchgit ? pkgs.fetchgit +}: + +stdenv.mkDerivation rec { + name = "microhttpdmy"; + + + src = fetchgit { + url = "https://git.gnunet.org/libmicrohttpd.git"; + rev = "refs/tags/v0.9.77"; + sha256 = "sha256-x+nfB07PbZwBlFc6kZZFYiRpk0a3QN/ByHB+hC8na/o="; + }; + + nativeBuildInputs = with pkgs; [ automake libtool autoconf texinfo ]; + + buildInputs = with pkgs; [ ]; + + configurePhase = '' + ./autogen.sh + ./configure --enable-static --disable-tests --disable-benchmark --disable-shared --disable-https --with-pic + ''; + + installPhase = '' + make install DESTDIR=$out + ''; +} diff --git a/assembly/nix/openssl.nix b/assembly/nix/openssl.nix new file mode 100644 index 00000000..8d30aa50 --- /dev/null +++ b/assembly/nix/openssl.nix @@ -0,0 +1,30 @@ +{ pkgs ? import { system = builtins.currentSystem; } +, stdenv ? pkgs.stdenv +, fetchFromGitHub ? pkgs.fetchFromGitHub +}: + +stdenv.mkDerivation rec { + name = "opensslmy"; + + src = fetchFromGitHub { + owner = "openssl"; + repo = "openssl"; + rev = "refs/tags/openssl-3.1.4"; + sha256 = "sha256-Vvf1wiNb4ikg1lIS9U137aodZ2JzM711tSWMJFYWtWI="; + }; + + nativeBuildInputs = with pkgs; [ perl ]; + + buildInputs = with pkgs; [ ]; + + postPatch = '' + patchShebangs Configure + ''; + + configurePhase = '' + ./Configure no-shared + ''; + installPhase = '' + make install DESTDIR=$out + ''; +} diff --git a/assembly/wasm/fift-func-wasm-build-ubuntu.sh b/assembly/wasm/fift-func-wasm-build-ubuntu.sh new file mode 100644 index 00000000..9ca23cc0 --- /dev/null +++ b/assembly/wasm/fift-func-wasm-build-ubuntu.sh @@ -0,0 +1,159 @@ +# The script builds funcfift compiler to WASM + +# Execute these prerequisites first +# sudo apt update +# sudo apt install -y build-essential git make cmake ninja-build clang libgflags-dev zlib1g-dev libssl-dev \ +# libreadline-dev libmicrohttpd-dev pkg-config libgsl-dev python3 python3-dev python3-pip \ +# nodejs libsecp256k1-dev libsodium-dev automake libtool + +# wget https://apt.llvm.org/llvm.sh +# chmod +x llvm.sh +# sudo ./llvm.sh 16 all + +with_artifacts=false + +while getopts 'a' flag; do + case "${flag}" in + a) with_artifacts=true ;; + *) break + ;; + esac +done + +export CC=$(which clang-16) +export CXX=$(which clang++-16) +export CCACHE_DISABLE=1 + +cd ../.. +rm -rf openssl zlib emsdk secp256k1 libsodium build +echo `pwd` + +git clone https://github.com/openssl/openssl.git +cd openssl +git checkout checkout openssl-3.1.4 +./config +make -j16 +OPENSSL_DIR=`pwd` +cd .. + +git clone https://github.com/madler/zlib.git +cd zlib +ZLIB_DIR=`pwd` +cd .. + +git clone https://github.com/bitcoin-core/secp256k1.git +cd secp256k1 +./autogen.sh +SECP256K1_DIR=`pwd` +cd .. + +git clone https://github.com/jedisct1/libsodium --branch stable +cd libsodium +SODIUM_DIR=`pwd` +cd .. + +mkdir build +cd build +cmake -GNinja -DCMAKE_BUILD_TYPE=Release \ +-DCMAKE_CXX_STANDARD=17 \ +-DOPENSSL_FOUND=1 \ +-DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ +-DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ +-DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.so \ +-DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.so \ +-DTON_USE_ABSEIL=OFF .. + +test $? -eq 0 || { echo "Can't configure TON build"; exit 1; } + +ninja fift smc-envelope + +test $? -eq 0 || { echo "Can't compile fift "; exit 1; } + +rm -rf * + +cd .. + +git clone https://github.com/emscripten-core/emsdk.git +cd emsdk +./emsdk install 3.1.19 +./emsdk activate 3.1.19 +EMSDK_DIR=`pwd` +ls $EMSDK_DIR + +. $EMSDK_DIR/emsdk_env.sh +export CC=$(which emcc) +export CXX=$(which em++) +export CCACHE_DISABLE=1 + +cd ../openssl + +make clean +emconfigure ./Configure linux-generic32 no-shared no-dso no-engine no-unit-test +sed -i 's/CROSS_COMPILE=.*/CROSS_COMPILE=/g' Makefile +sed -i 's/-ldl//g' Makefile +sed -i 's/-O3/-Os/g' Makefile +emmake make depend +emmake make -j16 +test $? -eq 0 || { echo "Can't compile OpenSSL with emmake "; exit 1; } + +cd ../zlib + +emconfigure ./configure --static +emmake make -j16 +test $? -eq 0 || { echo "Can't compile zlib with emmake "; exit 1; } +ZLIB_DIR=`pwd` + +cd ../secp256k1 + +emconfigure ./configure --enable-module-recovery +emmake make -j16 +test $? -eq 0 || { echo "Can't compile secp256k1 with emmake "; exit 1; } + +cd ../libsodium + +emconfigure ./configure --disable-ssp +emmake make -j16 +test $? -eq 0 || { echo "Can't compile libsodium with emmake "; exit 1; } + +cd ../build + +emcmake cmake -DUSE_EMSCRIPTEN=ON -DCMAKE_BUILD_TYPE=Release \ +-DZLIB_FOUND=1 \ +-DZLIB_LIBRARIES=$ZLIB_DIR/libz.a \ +-DZLIB_INCLUDE_DIR=$ZLIB_DIR \ +-DOPENSSL_FOUND=1 \ +-DOPENSSL_ROOT_DIR=$OPENSSL_DIR \ +-DOPENSSL_INCLUDE_DIR=$OPENSSL_DIR/include \ +-DOPENSSL_CRYPTO_LIBRARY=$OPENSSL_DIR/libcrypto.a \ +-DOPENSSL_SSL_LIBRARY=$OPENSSL_DIR/libssl.a \ +-DCMAKE_TOOLCHAIN_FILE=$EMSDK_DIR/upstream/emscripten/cmake/Modules/Platform/Emscripten.cmake \ +-DCMAKE_CXX_FLAGS="-sUSE_ZLIB=1" \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=$SECP256K1_DIR/include \ +-DSECP256K1_LIBRARY=$SECP256K1_DIR/.libs/libsecp256k1.a \ +-DSODIUM_INCLUDE_DIR=$SODIUM_DIR/src/libsodium/include \ +-DSODIUM_LIBRARY_RELEASE=$SODIUM_DIR/src/libsodium/.libs/libsodium.a \ +-DSODIUM_USE_STATIC_LIBS=ON .. + +test $? -eq 0 || { echo "Can't configure TON with emmake "; exit 1; } +cp -R ../crypto/smartcont ../crypto/fift/lib crypto + +emmake make -j16 funcfiftlib func fift tlbc emulator-emscripten + +test $? -eq 0 || { echo "Can't compile TON with emmake "; exit 1; } + +if [ "$with_artifacts" = true ]; then + echo "Creating artifacts..." + cd .. + rm -rf artifacts + mkdir artifacts + ls build/crypto + cp build/crypto/fift* artifacts + cp build/crypto/func* artifacts + cp build/crypto/tlbc* artifacts + cp build/emulator/emulator-emscripten* artifacts + cp -R crypto/smartcont artifacts + cp -R crypto/fift/lib artifacts +fi + + diff --git a/blockchain-explorer/CMakeLists.txt b/blockchain-explorer/CMakeLists.txt index b1979e70..37a00c76 100644 --- a/blockchain-explorer/CMakeLists.txt +++ b/blockchain-explorer/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(NIX "Use \"ON\" for a static build." OFF) @@ -14,15 +14,29 @@ set(BLOCHAIN_EXPLORER_SOURCE add_executable(blockchain-explorer ${BLOCHAIN_EXPLORER_SOURCE}) if (NIX) - find_package(PkgConfig REQUIRED) - pkg_check_modules(MHD libmicrohttpd) - target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIRS} ${MHD_STATIC_INCLUDE_DIRS}) - target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block lite-client-common ${MHD_LIBRARIES} ${MHD_STATIC_LIBRARIES}) + if (MHD_FOUND) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) + else() + find_package(PkgConfig REQUIRED) + pkg_check_modules(MHD libmicrohttpd) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR} ${MHD_STATIC_INCLUDE_DIRS}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARIES} ${MHD_STATIC_LIBRARIES}) + endif() else() - find_package(MHD) - target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIRS}) - target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block lite-client-common ${MHD_LIBRARIES}) + if (MHD_FOUND) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) + else() + find_package(MHD) + target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) + target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) + endif() endif() +target_include_directories(blockchain-explorer PUBLIC ${MHD_INCLUDE_DIR}) +target_link_libraries(blockchain-explorer tdutils tdactor adnllite tl_lite_api tl-lite-utils ton_crypto ton_block ${MHD_LIBRARY}) +target_link_libraries(blockchain-explorer lite-client-common) + install(TARGETS blockchain-explorer RUNTIME DESTINATION bin) diff --git a/blockchain-explorer/blockchain-explorer.cpp b/blockchain-explorer/blockchain-explorer.cpp index 9baee7d8..c22ce1b3 100644 --- a/blockchain-explorer/blockchain-explorer.cpp +++ b/blockchain-explorer/blockchain-explorer.cpp @@ -52,7 +52,7 @@ #include "vm/boc.h" #include "vm/cellops.h" #include "vm/cells/MerkleProof.h" -#include "vm/cp0.h" +#include "vm/vm.h" #include "auto/tl/lite_api.h" #include "ton/lite-tl.hpp" @@ -104,23 +104,24 @@ class HttpQueryRunner { Self->finish(nullptr); } }); - mutex_.lock(); scheduler_ptr->run_in_context_external([&]() { func(std::move(P)); }); } void finish(MHD_Response* response) { + std::unique_lock lock(mutex_); response_ = response; - mutex_.unlock(); + cond.notify_all(); } MHD_Response* wait() { - mutex_.lock(); - mutex_.unlock(); + std::unique_lock lock(mutex_); + cond.wait(lock, [&]() { return response_ != nullptr; }); return response_; } private: std::function)> func_; - MHD_Response* response_; + MHD_Response* response_ = nullptr; std::mutex mutex_; + std::condition_variable cond; }; class CoreActor : public CoreActorInterface { @@ -643,7 +644,7 @@ int main(int argc, char* argv[]) { }); #endif - vm::init_op_cp0(); + vm::init_vm().ensure(); td::actor::Scheduler scheduler({2}); scheduler_ptr = &scheduler; diff --git a/catchain/CMakeLists.txt b/catchain/CMakeLists.txt index a57d3788..8ab9525d 100644 --- a/catchain/CMakeLists.txt +++ b/catchain/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/catchain/catchain-receiver.cpp b/catchain/catchain-receiver.cpp index 482cfb43..c8206de9 100644 --- a/catchain/catchain-receiver.cpp +++ b/catchain/catchain-receiver.cpp @@ -287,7 +287,9 @@ void CatChainReceiverImpl::add_block_cont_3(tl_object_ptrdelivered()); + LOG_CHECK(last_sent_block_->delivered()) + << "source=" << last_sent_block_->get_source_id() << " ill=" << last_sent_block_->is_ill() + << " height=" << last_sent_block_->get_height(); } active_send_ = false; diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 96d92371..88a3671b 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(COMMON_SOURCE checksum.h diff --git a/common/global-version.h b/common/global-version.h index 01c1795d..0a90ab85 100644 --- a/common/global-version.h +++ b/common/global-version.h @@ -19,6 +19,6 @@ namespace ton { // See doc/GlobalVersions.md -const int SUPPORTED_VERSION = 4; +const int SUPPORTED_VERSION = 6; } diff --git a/create-hardfork/CMakeLists.txt b/create-hardfork/CMakeLists.txt index 3d78c118..41b94b52 100644 --- a/create-hardfork/CMakeLists.txt +++ b/create-hardfork/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/create-hardfork/create-hardfork.cpp b/create-hardfork/create-hardfork.cpp index 165748f9..66b9a854 100644 --- a/create-hardfork/create-hardfork.cpp +++ b/create-hardfork/create-hardfork.cpp @@ -49,7 +49,7 @@ #include "validator/fabric.h" #include "validator/impl/collator.h" -#include "crypto/vm/cp0.h" +#include "crypto/vm/vm.h" #include "crypto/block/block-db.h" #include "common/errorlog.h" @@ -310,7 +310,7 @@ int main(int argc, char *argv[]) { SET_VERBOSITY_LEVEL(verbosity_INFO); td::set_default_failure_signal_handler().ensure(); - CHECK(vm::init_op_cp0()); + vm::init_vm().ensure(); td::actor::ActorOwn x; diff --git a/crypto/CMakeLists.txt b/crypto/CMakeLists.txt index 13e4c5bd..0871d250 100644 --- a/crypto/CMakeLists.txt +++ b/crypto/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) @@ -236,6 +236,7 @@ set(SMC_ENVELOPE_SOURCE smc-envelope/SmartContractCode.cpp smc-envelope/WalletInterface.cpp smc-envelope/WalletV3.cpp + smc-envelope/WalletV4.cpp smc-envelope/GenericAccount.h smc-envelope/HighloadWallet.h @@ -246,6 +247,7 @@ set(SMC_ENVELOPE_SOURCE smc-envelope/SmartContractCode.h smc-envelope/WalletInterface.h smc-envelope/WalletV3.h + smc-envelope/WalletV4.h ) set(ED25519_TEST_SOURCE @@ -323,23 +325,20 @@ endif() if (MSVC) find_package(Sodium REQUIRED) target_compile_definitions(ton_crypto PUBLIC SODIUM_STATIC) - target_include_directories(ton_crypto_core PUBLIC $) - target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARIES}) -elseif (ANDROID) + target_include_directories(ton_crypto_core PUBLIC $) + target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARY}) +elseif (ANDROID OR EMSCRIPTEN) target_include_directories(ton_crypto_core PUBLIC $) target_link_libraries(ton_crypto PUBLIC $) else() - if (NOT USE_EMSCRIPTEN) + if (NOT SODIUM_FOUND) find_package(Sodium REQUIRED) - target_include_directories(ton_crypto_core PUBLIC $) - target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARIES}) else() - target_include_directories(ton_crypto_core PUBLIC $) - target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARY}) - endif() - if (NOT APPLE AND NOT USE_EMSCRIPTEN) - target_link_libraries(ton_crypto_core PUBLIC secp256k1) + message(STATUS "Using Sodium ${SODIUM_LIBRARY_RELEASE}") endif() + target_compile_definitions(ton_crypto PUBLIC SODIUM_STATIC) + target_include_directories(ton_crypto_core PUBLIC $) + target_link_libraries(ton_crypto PUBLIC ${SECP256K1_LIBRARY}) endif() target_include_directories(ton_crypto_core PUBLIC $) @@ -354,7 +353,7 @@ add_executable(test-ed25519-crypto test/test-ed25519-crypto.cpp) target_include_directories(test-ed25519-crypto PUBLIC $) target_link_libraries(test-ed25519-crypto PUBLIC ton_crypto) -add_library(fift-lib ${FIFT_SOURCE}) +add_library(fift-lib STATIC ${FIFT_SOURCE}) target_include_directories(fift-lib PUBLIC $) target_link_libraries(fift-lib PUBLIC ton_crypto ton_db tdutils ton_block) if (USE_EMSCRIPTEN) @@ -398,6 +397,7 @@ if (USE_EMSCRIPTEN) target_link_options(funcfiftlib PRIVATE -sIGNORE_MISSING_MAIN=1) target_link_options(funcfiftlib PRIVATE -sAUTO_NATIVE_LIBRARIES=0) target_link_options(funcfiftlib PRIVATE -sMODULARIZE=1) + target_link_options(funcfiftlib PRIVATE -sTOTAL_MEMORY=33554432) target_link_options(funcfiftlib PRIVATE -sALLOW_MEMORY_GROWTH=1) target_link_options(funcfiftlib PRIVATE -sALLOW_TABLE_GROWTH=1) target_link_options(funcfiftlib PRIVATE --embed-file ${CMAKE_CURRENT_SOURCE_DIR}/fift/lib@/fiftlib) @@ -504,7 +504,7 @@ if (NOT CMAKE_CROSSCOMPILING OR USE_EMSCRIPTEN) GenFif(DEST smartcont/auto/simple-wallet-ext-code SOURCE smartcont/simple-wallet-ext-code.fc NAME simple-wallet-ext) endif() -add_library(smc-envelope ${SMC_ENVELOPE_SOURCE}) +add_library(smc-envelope STATIC ${SMC_ENVELOPE_SOURCE}) target_include_directories(smc-envelope PUBLIC $) target_link_libraries(smc-envelope PUBLIC ton_crypto PRIVATE tdutils ton_block) if (NOT CMAKE_CROSSCOMPILING) diff --git a/crypto/block/block.cpp b/crypto/block/block.cpp index 14f2c18b..cb934417 100644 --- a/crypto/block/block.cpp +++ b/crypto/block/block.cpp @@ -734,7 +734,7 @@ td::uint64 BlockLimitStatus::estimate_block_size(const vm::NewCellStorageStat::S sum += *extra; } return 2000 + (sum.bits >> 3) + sum.cells * 12 + sum.internal_refs * 3 + sum.external_refs * 40 + accounts * 200 + - transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300; + transactions * 200 + (extra ? 200 : 0) + extra_out_msgs * 300 + public_library_diff * 700; } int BlockLimitStatus::classify() const { @@ -1030,8 +1030,8 @@ td::Status ShardState::merge_with(ShardState& sib) { return td::Status::OK(); } -td::Result> ShardState::compute_split_out_msg_queue( - ton::ShardIdFull subshard) { +td::Result> ShardState::compute_split_out_msg_queue(ton::ShardIdFull subshard, + td::uint32* queue_size) { auto shard = id_.shard_full(); if (!ton::shard_is_parent(shard, subshard)) { return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() + @@ -1039,7 +1039,7 @@ td::Result> ShardState::compute_split_o } CHECK(out_msg_queue_); auto subqueue = std::make_unique(*out_msg_queue_); - int res = block::filter_out_msg_queue(*subqueue, shard, subshard); + int res = block::filter_out_msg_queue(*subqueue, shard, subshard, queue_size); if (res < 0) { return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str()); } @@ -1061,7 +1061,7 @@ td::Result> ShardState::compu return std::move(sub_processed_upto); } -td::Status ShardState::split(ton::ShardIdFull subshard) { +td::Status ShardState::split(ton::ShardIdFull subshard, td::uint32* queue_size) { if (!ton::shard_is_parent(id_.shard_full(), subshard)) { return td::Status::Error(-666, "cannot split subshard "s + subshard.to_str() + " from state of " + id_.to_str() + " because it is not a parent"); @@ -1079,7 +1079,7 @@ td::Status ShardState::split(ton::ShardIdFull subshard) { auto shard1 = id_.shard_full(); CHECK(ton::shard_is_parent(shard1, subshard)); CHECK(out_msg_queue_); - int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard); + int res1 = block::filter_out_msg_queue(*out_msg_queue_, shard1, subshard, queue_size); if (res1 < 0) { return td::Status::Error(-666, "error splitting OutMsgQueue of "s + id_.to_str()); } @@ -1119,8 +1119,12 @@ td::Status ShardState::split(ton::ShardIdFull subshard) { return td::Status::OK(); } -int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard) { - return out_queue.filter([subshard, old_shard](vm::CellSlice& cs, td::ConstBitPtr key, int key_len) -> int { +int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard, + td::uint32* queue_size) { + if (queue_size) { + *queue_size = 0; + } + return out_queue.filter([=](vm::CellSlice& cs, td::ConstBitPtr key, int key_len) -> int { CHECK(key_len == 352); LOG(DEBUG) << "scanning OutMsgQueue entry with key " << key.to_hex(key_len); block::tlb::MsgEnvelope::Record_std env; @@ -1143,7 +1147,11 @@ int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull ol << " does not contain current address belonging to shard " << old_shard.to_str(); return -1; } - return ton::shard_contains(subshard, cur_prefix); + bool res = ton::shard_contains(subshard, cur_prefix); + if (res && queue_size) { + ++*queue_size; + } + return res; }); } diff --git a/crypto/block/block.h b/crypto/block/block.h index f2c89286..61b057c4 100644 --- a/crypto/block/block.h +++ b/crypto/block/block.h @@ -275,6 +275,7 @@ struct BlockLimitStatus { vm::NewCellStorageStat st_stat; unsigned accounts{}, transactions{}, extra_out_msgs{}; vm::ProofStorageStat collated_data_stat; + unsigned public_library_diff{}; BlockLimitStatus(const BlockLimits& limits_, ton::LogicalTime lt = 0) : limits(limits_), cur_lt(std::max(limits_.start_lt, lt)) { } @@ -284,6 +285,7 @@ struct BlockLimitStatus { transactions = accounts = 0; gas_used = 0; extra_out_msgs = 0; + public_library_diff = 0; collated_data_stat = {}; } td::uint64 estimate_block_size(const vm::NewCellStorageStat::Stat* extra = nullptr) const; @@ -445,10 +447,11 @@ struct ShardState { ton::BlockSeqno prev_mc_block_seqno, bool after_split, bool clear_history, std::function for_each_mcseqno); td::Status merge_with(ShardState& sib); - td::Result> compute_split_out_msg_queue(ton::ShardIdFull subshard); + td::Result> compute_split_out_msg_queue(ton::ShardIdFull subshard, + td::uint32* queue_size = nullptr); td::Result> compute_split_processed_upto( ton::ShardIdFull subshard); - td::Status split(ton::ShardIdFull subshard); + td::Status split(ton::ShardIdFull subshard, td::uint32* queue_size = nullptr); td::Status unpack_out_msg_queue_info(Ref out_msg_queue_info); bool clear_load_history() { overload_history_ = underload_history_ = 0; @@ -668,7 +671,8 @@ class MtCarloComputeShare { void gen_vset(); }; -int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard); +int filter_out_msg_queue(vm::AugmentedDictionary& out_queue, ton::ShardIdFull old_shard, ton::ShardIdFull subshard, + td::uint32* queue_size = nullptr); std::ostream& operator<<(std::ostream& os, const ShardId& shard_id); diff --git a/crypto/block/block.tlb b/crypto/block/block.tlb index 9e542999..9988f2d1 100644 --- a/crypto/block/block.tlb +++ b/crypto/block/block.tlb @@ -789,7 +789,8 @@ _ CollatorConfig = ConfigParam 41; size_limits_config#01 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16 max_ext_msg_size:uint32 max_ext_msg_depth:uint16 = SizeLimitsConfig; size_limits_config_v2#02 max_msg_bits:uint32 max_msg_cells:uint32 max_library_cells:uint32 max_vm_data_depth:uint16 - max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32 = SizeLimitsConfig; + max_ext_msg_size:uint32 max_ext_msg_depth:uint16 max_acc_state_cells:uint32 max_acc_state_bits:uint32 + max_acc_public_libraries:uint32 = SizeLimitsConfig; _ SizeLimitsConfig = ConfigParam 43; // key is [ wc:int32 addr:uint256 ] diff --git a/crypto/block/mc-config.cpp b/crypto/block/mc-config.cpp index 1ab907cf..54706936 100644 --- a/crypto/block/mc-config.cpp +++ b/crypto/block/mc-config.cpp @@ -621,12 +621,14 @@ td::Result> Config::get_storage_prices() const { } vm::Dictionary dict{std::move(cell), 32}; if (!dict.check_for_each([&res](Ref cs_ref, td::ConstBitPtr key, int n) -> bool { - block::gen::StoragePrices::Record data; - if (!tlb::csr_unpack(std::move(cs_ref), data) || data.utime_since != key.get_uint(n)) { + auto r_prices = do_get_one_storage_prices(*cs_ref); + if (r_prices.is_error()) { + return false; + } + res.push_back(r_prices.move_as_ok()); + if (res.back().valid_since != key.get_uint(n)) { return false; } - res.emplace_back(data.utime_since, data.bit_price_ps, data.cell_price_ps, data.mc_bit_price_ps, - data.mc_cell_price_ps); return true; })) { return td::Status::Error("invalid storage prices dictionary in configuration parameter 18"); @@ -634,16 +636,25 @@ td::Result> Config::get_storage_prices() const { return std::move(res); } -td::Result Config::do_get_gas_limits_prices(td::Ref cell, int id) { +td::Result Config::do_get_one_storage_prices(vm::CellSlice cs) { + block::gen::StoragePrices::Record data; + if (!tlb::unpack(cs, data)) { + return td::Status::Error("invalid storage prices dictionary in configuration parameter 18"); + } + return StoragePrices{data.utime_since, data.bit_price_ps, data.cell_price_ps, data.mc_bit_price_ps, + data.mc_cell_price_ps}; +} + +td::Result Config::do_get_gas_limits_prices(vm::CellSlice cs, int id) { GasLimitsPrices res; - auto cs = vm::load_cell_slice(cell); + vm::CellSlice cs0 = cs; block::gen::GasLimitsPrices::Record_gas_flat_pfx flat; if (tlb::unpack(cs, flat)) { cs = *flat.other; res.flat_gas_limit = flat.flat_gas_limit; res.flat_gas_price = flat.flat_gas_price; } else { - cs = vm::load_cell_slice(cell); + cs = cs0; } auto f = [&](const auto& r, td::uint64 spec_limit) { res.gas_limit = r.gas_limit; @@ -658,7 +669,7 @@ td::Result Config::do_get_gas_limits_prices(td::Ref c f(rec, rec.special_gas_limit); } else { block::gen::GasLimitsPrices::Record_gas_prices rec0; - if (tlb::unpack(cs, rec0)) { + if (tlb::unpack(cs = cs0, rec0)) { f(rec0, rec0.gas_limit); } else { return td::Status::Error(PSLICE() << "configuration parameter " << id @@ -688,7 +699,7 @@ td::Result Config::get_gas_limits_prices(bool is_masterchain) c if (cell.is_null()) { return td::Status::Error(PSLICE() << "configuration parameter " << id << " with gas prices is absent"); } - return do_get_gas_limits_prices(std::move(cell), id); + return do_get_gas_limits_prices(vm::load_cell_slice(cell), id); } td::Result Config::get_msg_prices(bool is_masterchain) const { @@ -697,7 +708,10 @@ td::Result Config::get_msg_prices(bool is_masterchain) const { if (cell.is_null()) { return td::Status::Error(PSLICE() << "configuration parameter " << id << " with msg prices is absent"); } - auto cs = vm::load_cell_slice(std::move(cell)); + return do_get_msg_prices(vm::load_cell_slice(cell), id); +} + +td::Result Config::do_get_msg_prices(vm::CellSlice cs, int id) { block::gen::MsgForwardPrices::Record rec; if (!tlb::unpack(cs, rec)) { return td::Status::Error(PSLICE() << "configuration parameter " << id @@ -1916,10 +1930,17 @@ std::vector Config::compute_total_validator_set(int next) c } td::Result Config::get_size_limits_config() const { - SizeLimitsConfig limits; td::Ref param = get_config_param(43); if (param.is_null()) { - return limits; + return do_get_size_limits_config({}); + } + return do_get_size_limits_config(vm::load_cell_slice_ref(param)); +} + +td::Result Config::do_get_size_limits_config(td::Ref cs) { + SizeLimitsConfig limits; + if (cs.is_null()) { + return limits; // default values } auto unpack_v1 = [&](auto& rec) { limits.max_msg_bits = rec.max_msg_bits; @@ -1934,12 +1955,13 @@ td::Result Config::get_size_limits_config() const { unpack_v1(rec); limits.max_acc_state_bits = rec.max_acc_state_bits; limits.max_acc_state_cells = rec.max_acc_state_cells; + limits.max_acc_public_libraries = rec.max_acc_public_libraries; }; gen::SizeLimitsConfig::Record_size_limits_config rec_v1; gen::SizeLimitsConfig::Record_size_limits_config_v2 rec_v2; - if (tlb::unpack_cell(param, rec_v1)) { + if (tlb::csr_unpack(cs, rec_v1)) { unpack_v1(rec_v1); - } else if (tlb::unpack_cell(param, rec_v2)) { + } else if (tlb::csr_unpack(cs, rec_v2)) { unpack_v2(rec_v2); } else { return td::Status::Error("configuration parameter 43 is invalid"); @@ -1974,6 +1996,42 @@ BurningConfig Config::get_burning_config() const { return c; } +td::Ref Config::get_unpacked_config_tuple(ton::UnixTime now) const { + auto get_param = [&](td::int32 idx) -> vm::StackEntry { + auto cell = get_config_param(idx); + if (cell.is_null()) { + return {}; + } + return vm::load_cell_slice_ref(cell); + }; + auto get_current_storage_prices = [&]() -> vm::StackEntry { + auto cell = get_config_param(18); + if (cell.is_null()) { + return {}; + } + vm::StackEntry res; + vm::Dictionary dict{std::move(cell), 32}; + dict.check_for_each([&](Ref cs_ref, td::ConstBitPtr key, int n) -> bool { + auto utime_since = key.get_uint(n); + if (now >= utime_since) { + res = std::move(cs_ref); + return true; + } + return false; + }); + return res; + }; + std::vector tuple; + tuple.push_back(get_current_storage_prices()); // storage_prices + tuple.push_back(get_param(19)); // global_id + tuple.push_back(get_param(20)); // config_mc_gas_prices + tuple.push_back(get_param(21)); // config_gas_prices + tuple.push_back(get_param(24)); // config_mc_fwd_prices + tuple.push_back(get_param(25)); // config_fwd_prices + tuple.push_back(get_param(43)); // size_limits_config + return td::make_cnt_ref>(std::move(tuple)); +} + td::Result> Config::unpack_validator_set_start_stop(Ref vset_root) { if (vset_root.is_null()) { return td::Status::Error("validator set absent"); diff --git a/crypto/block/mc-config.h b/crypto/block/mc-config.h index bf5350b5..1262a1d8 100644 --- a/crypto/block/mc-config.h +++ b/crypto/block/mc-config.h @@ -350,7 +350,11 @@ struct GasLimitsPrices { td::uint64 freeze_due_limit{0}; td::uint64 delete_due_limit{0}; - td::RefInt256 compute_gas_price(td::uint64 gas_used) const; + td::RefInt256 compute_gas_price(td::uint64 gas_used) const { + return gas_used <= flat_gas_limit + ? td::make_refint(flat_gas_price) + : td::rshift(td::make_refint(gas_price) * (gas_used - flat_gas_limit), 16, 1) + flat_gas_price; + } }; // msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms @@ -365,6 +369,7 @@ struct MsgPrices { td::uint32 first_frac; td::uint32 next_frac; td::uint64 compute_fwd_fees(td::uint64 cells, td::uint64 bits) const; + td::RefInt256 compute_fwd_fees256(td::uint64 cells, td::uint64 bits) const; std::pair compute_fwd_ihr_fees(td::uint64 cells, td::uint64 bits, bool ihr_disabled = false) const; MsgPrices() = default; @@ -389,6 +394,7 @@ struct SizeLimitsConfig { ExtMsgLimits ext_msg_limits; td::uint32 max_acc_state_cells = 1 << 16; td::uint32 max_acc_state_bits = (1 << 16) * 1023; + td::uint32 max_acc_public_libraries = 256; }; struct CatchainValidatorsConfig { @@ -614,9 +620,11 @@ class Config { bool is_special_smartcontract(const ton::StdSmcAddress& addr) const; static td::Result> unpack_validator_set(Ref valset_root); td::Result> get_storage_prices() const; + static td::Result do_get_one_storage_prices(vm::CellSlice cs); td::Result get_gas_limits_prices(bool is_masterchain = false) const; - static td::Result do_get_gas_limits_prices(td::Ref cell, int id); + static td::Result do_get_gas_limits_prices(vm::CellSlice cs, int id); td::Result get_msg_prices(bool is_masterchain = false) const; + static td::Result do_get_msg_prices(vm::CellSlice cs, int id); static CatchainValidatorsConfig unpack_catchain_validators_config(Ref cell); CatchainValidatorsConfig get_catchain_validators_config() const; td::Status visit_validator_params() const; @@ -644,8 +652,10 @@ class Config { std::vector compute_total_validator_set(int next) const; CollatorConfig get_collator_config(bool need_collator_nodes) const; td::Result get_size_limits_config() const; + static td::Result do_get_size_limits_config(td::Ref cs); std::unique_ptr get_suspended_addresses(ton::UnixTime now) const; BurningConfig get_burning_config() const; + td::Ref get_unpacked_config_tuple(ton::UnixTime now) const; static std::vector do_compute_validator_set(const block::CatchainValidatorsConfig& ccv_conf, ton::ShardIdFull shard, const block::ValidatorSet& vset, ton::UnixTime time, diff --git a/crypto/block/transaction.cpp b/crypto/block/transaction.cpp index 0c77317f..b3aa6c8f 100644 --- a/crypto/block/transaction.cpp +++ b/crypto/block/transaction.cpp @@ -28,9 +28,20 @@ #include "td/utils/Timer.h" namespace { +/** + * Logger that stores the tail of log messages. + * + * @param max_size The size of the buffer. Default is 256. + */ class StringLoggerTail : public td::LogInterface { public: explicit StringLoggerTail(size_t max_size = 256) : buf(max_size, '\0') {} + + /** + * Appends a slice of data to the buffer. + * + * @param slice The slice of data to be appended. + */ void append(td::CSlice slice) override { if (slice.size() > buf.size()) { slice.remove_prefix(slice.size() - buf.size()); @@ -46,6 +57,12 @@ class StringLoggerTail : public td::LogInterface { slice.remove_prefix(s); } } + + /** + * Retrieves the tail of the log. + * + * @returns The log as std::string. + */ std::string get_log() const { if (truncated) { std::string res = buf; @@ -55,6 +72,7 @@ class StringLoggerTail : public td::LogInterface { return buf.substr(0, pos); } } + private: std::string buf; size_t pos = 0; @@ -65,6 +83,13 @@ class StringLoggerTail : public td::LogInterface { namespace block { using td::Ref; +/** + * Looks up a library among public libraries. + * + * @param key A constant bit pointer representing the key of the library to lookup. + * + * @returns A reference to the library cell if found, null otherwise. + */ Ref ComputePhaseConfig::lookup_library(td::ConstBitPtr key) const { return libraries ? vm::lookup_library_in(key, libraries->get_root_cell()) : Ref{}; } @@ -75,12 +100,27 @@ Ref ComputePhaseConfig::lookup_library(td::ConstBitPtr key) const { * */ +/** + * Sets the address of the account. + * + * @param wc The workchain ID of the account. + * @param new_addr The new address of the account. + * + * @returns True if the address was successfully set, false otherwise. + */ bool Account::set_address(ton::WorkchainId wc, td::ConstBitPtr new_addr) { workchain = wc; addr = new_addr; return true; } +/** + * Sets the split depth of the account. + * + * @param new_split_depth The new split depth value to be set. + * + * @returns True if the split depth was successfully set, False otherwise. + */ bool Account::set_split_depth(int new_split_depth) { if (new_split_depth < 0 || new_split_depth > 30) { return false; // invalid value for split_depth @@ -94,11 +134,26 @@ bool Account::set_split_depth(int new_split_depth) { } } +/** + * Checks if the given split depth is valid for the Account. + * + * @param split_depth The split depth to be checked. + * + * @returns True if the split depth is valid, False otherwise. + */ bool Account::check_split_depth(int split_depth) const { return split_depth_set_ ? (split_depth == split_depth_) : (split_depth >= 0 && split_depth <= 30); } -// initializes split_depth and addr_rewrite +/** + * Parses anycast data of the account address. + * + * Initializes split_depth and addr_rewrite. + * + * @param cs The cell slice containing partially-parsed account addressa. + * + * @returns True if parsing was successful, false otherwise. + */ bool Account::parse_maybe_anycast(vm::CellSlice& cs) { int t = (int)cs.fetch_ulong(1); if (t < 0) { @@ -113,6 +168,13 @@ bool Account::parse_maybe_anycast(vm::CellSlice& cs) { && set_split_depth(depth); } +/** + * Stores the anycast information to a serialized account address. + * + * @param cb The vm::CellBuilder object to store the information in. + * + * @returns True if the anycast information was successfully stored, false otherwise. + */ bool Account::store_maybe_anycast(vm::CellBuilder& cb) const { if (!split_depth_set_ || !split_depth_) { return cb.store_bool_bool(false); @@ -122,6 +184,13 @@ bool Account::store_maybe_anycast(vm::CellBuilder& cb) const { && cb.store_bits_bool(addr_rewrite.cbits(), split_depth_); // rewrite_pfx:(bits depth) } +/** + * Unpacks the address from a given CellSlice. + * + * @param addr_cs The CellSlice containing the address. + * + * @returns True if the address was successfully unpacked, False otherwise. + */ bool Account::unpack_address(vm::CellSlice& addr_cs) { int addr_tag = block::gen::t_MsgAddressInt.get_tag(addr_cs); int new_wc = ton::workchainInvalid; @@ -172,6 +241,15 @@ bool Account::unpack_address(vm::CellSlice& addr_cs) { return true; } +/** + * Unpacks storage information from a CellSlice. + * + * Storage information is serialized using StorageInfo TLB-scheme. + * + * @param cs The CellSlice containing the storage information. + * + * @returns True if the unpacking is successful, false otherwise. + */ bool Account::unpack_storage_info(vm::CellSlice& cs) { block::gen::StorageInfo::Record info; block::gen::StorageUsed::Record used; @@ -198,7 +276,16 @@ bool Account::unpack_storage_info(vm::CellSlice& cs) { return (u != std::numeric_limits::max()); } -// initializes split_depth (from account state - StateInit) +/** + * Unpacks the state of an Account from a CellSlice. + * + * State is serialized using StateInit TLB-scheme. + * Initializes split_depth (from account state - StateInit) + * + * @param cs The CellSlice containing the serialized state. + * + * @returns True if the state was successfully unpacked, False otherwise. + */ bool Account::unpack_state(vm::CellSlice& cs) { block::gen::StateInit::Record state; if (!tlb::unpack_exact(cs, state)) { @@ -226,6 +313,13 @@ bool Account::unpack_state(vm::CellSlice& cs) { return true; } +/** + * Computes the address of the account. + * + * @param force If set to true, the address will be recomputed even if it already exists. + * + * @returns True if the address was successfully computed, false otherwise. + */ bool Account::compute_my_addr(bool force) { if (!force && my_addr.not_null() && my_addr_exact.not_null()) { return true; @@ -266,6 +360,15 @@ bool Account::compute_my_addr(bool force) { return true; } +/** + * Computes the address of the Account. + * + * @param tmp_addr A reference to the CellSlice for the result. + * @param split_depth The split depth for the address. + * @param orig_addr_rewrite Address prefox of length split_depth. + * + * @returns True if the address was successfully computed, false otherwise. + */ bool Account::recompute_tmp_addr(Ref& tmp_addr, int split_depth, td::ConstBitPtr orig_addr_rewrite) const { if (!split_depth && my_addr_exact.not_null()) { @@ -307,6 +410,14 @@ bool Account::recompute_tmp_addr(Ref& tmp_addr, int split_depth, (tmp_addr = vm::load_cell_slice_ref(std::move(cell))).not_null(); } +/** + * Sets address rewriting info for a newly-activated account. + * + * @param split_depth The split depth for the account address. + * @param orig_addr_rewrite Address frepix of length split_depth. + * + * @returns True if the rewriting info was successfully set, false otherwise. + */ bool Account::init_rewrite_addr(int split_depth, td::ConstBitPtr orig_addr_rewrite) { if (split_depth_set_ || !set_split_depth(split_depth)) { return false; @@ -317,8 +428,18 @@ bool Account::init_rewrite_addr(int split_depth, td::ConstBitPtr orig_addr_rewri return compute_my_addr(true); } -// used to unpack previously existing accounts -bool Account::unpack(Ref shard_account, Ref extra, ton::UnixTime now, bool special) { +/** + * Unpacks the account information from the provided CellSlice. + * + * Used to unpack previously existing accounts. + * + * @param shard_account The ShardAccount to unpack. + * @param now The current Unix time. + * @param special Flag indicating if the account is special. + * + * @returns True if the unpacking is successful, false otherwise. + */ +bool Account::unpack(Ref shard_account, ton::UnixTime now, bool special) { LOG(DEBUG) << "unpacking " << (special ? "special " : "") << "account " << addr.to_hex(); if (shard_account.is_null()) { LOG(ERROR) << "account " << addr.to_hex() << " does not have a valid ShardAccount to unpack"; @@ -386,7 +507,13 @@ bool Account::unpack(Ref shard_account, Ref extra, return true; } -// used to initialize new accounts +/** + * Initializes a new Account object. + * + * @param now The current Unix time. + * + * @returns True if the initialization is successful, false otherwise. + */ bool Account::init_new(ton::UnixTime now) { // only workchain and addr are initialized at this point if (workchain == ton::workchainInvalid) { @@ -429,6 +556,11 @@ bool Account::init_new(ton::UnixTime now) { return true; } +/** + * Resets the split depth of the account. + * + * @returns True if the split depth was successfully reset, false otherwise. + */ bool Account::forget_split_depth() { split_depth_set_ = false; split_depth_ = 0; @@ -438,6 +570,11 @@ bool Account::forget_split_depth() { return true; } +/** + * Deactivates the account. + * + * @returns True if the account was successfully deactivated, false otherwise. + */ bool Account::deactivate() { if (status == acc_active) { return false; @@ -461,10 +598,26 @@ bool Account::deactivate() { return true; } +/** + * Checks if the account belongs to a specific shard. + * + * @param shard The shard to check against. + * + * @returns True if the account belongs to the shard, False otherwise. + */ bool Account::belongs_to_shard(ton::ShardIdFull shard) const { return workchain == shard.workchain && ton::shard_is_ancestor(shard.shard, addr); } +/** + * Adds the partial storage payment to the total sum. + * + * @param payment The total sum to be updated. + * @param delta The time delta for which the payment is calculated. + * @param prices The storage prices. + * @param storage Account storage statistics. + * @param is_mc A flag indicating whether the account is in the masterchain. + */ void add_partial_storage_payment(td::BigInt256& payment, ton::UnixTime delta, const block::StoragePrices& prices, const vm::CellStorageStat& storage, bool is_mc) { td::BigInt256 c{(long long)storage.cells}, b{(long long)storage.bits}; @@ -483,11 +636,23 @@ void add_partial_storage_payment(td::BigInt256& payment, ton::UnixTime delta, co payment += b; } +/** + * Computes the storage fees based on the given parameters. + * + * @param now The current Unix time. + * @param pricing The vector of storage prices. + * @param storage_stat Account storage statistics. + * @param last_paid The Unix time when the last payment was made. + * @param is_special A flag indicating if the account is special. + * @param is_masterchain A flag indicating if the account is in the masterchain. + * + * @returns The computed storage fees as RefInt256. + */ td::RefInt256 StoragePrices::compute_storage_fees(ton::UnixTime now, const std::vector& pricing, const vm::CellStorageStat& storage_stat, ton::UnixTime last_paid, bool is_special, bool is_masterchain) { if (now <= last_paid || !last_paid || is_special || pricing.empty() || now <= pricing[0].valid_since) { - return {}; + return td::zero_refint(); } std::size_t n = pricing.size(), i = n; while (i && pricing[i - 1].valid_since > last_paid) { @@ -509,11 +674,30 @@ td::RefInt256 StoragePrices::compute_storage_fees(ton::UnixTime now, const std:: return td::rshift(total, 16, 1); // divide by 2^16 with ceil rounding to obtain nanograms } +/** + * Computes the storage fees for the account. + * + * @param now The current Unix time. + * @param pricing The vector of storage prices. + * + * @returns The computed storage fees as RefInt256. + */ td::RefInt256 Account::compute_storage_fees(ton::UnixTime now, const std::vector& pricing) const { return StoragePrices::compute_storage_fees(now, pricing, storage_stat, last_paid, is_special, is_masterchain()); } namespace transaction { +/** + * Constructs a new Transaction object. + * + * @param _account The Account object. + * @param ttype The type of the transaction (see transaction.cpp#309). + * @param req_start_lt The minimal logical time of the transaction. + * @param _now The current Unix time. + * @param _inmsg The input message that caused the transaction. + * + * @returns None + */ Transaction::Transaction(const Account& _account, int ttype, ton::LogicalTime req_start_lt, ton::UnixTime _now, Ref _inmsg) : trans_type(ttype) @@ -540,6 +724,14 @@ Transaction::Transaction(const Account& _account, int ttype, ton::LogicalTime re } } +/** + * Unpacks the input message of a transaction. + * + * @param ihr_delivered A boolean indicating whether the message was delivered using IHR (Instant Hypercube Routing). + * @param cfg Action phase configuration. + * + * @returns A boolean indicating whether the unpacking was successful. + */ bool Transaction::unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* cfg) { if (in_msg.is_null() || in_msg_type) { return false; @@ -680,11 +872,20 @@ bool Transaction::unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* return true; } +/** + * Prepares the storage phase of a transaction. + * + * @param cfg The configuration for the storage phase. + * @param force_collect Flag indicating whether to collect fees for frozen accounts. + * @param adjust_msg_value Flag indicating whether to adjust the message value if the account balance becomes less than the message balance. + * + * @returns True if the storage phase was successfully prepared, false otherwise. + */ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool force_collect, bool adjust_msg_value) { if (now < account.last_paid) { return false; } - auto to_pay = account.compute_storage_fees(now, *(cfg.pricing)); + auto to_pay = account.compute_storage_fees(now, *(cfg.pricing)) + due_payment; if (to_pay.not_null() && sgn(to_pay) < 0) { return false; } @@ -697,7 +898,7 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc res->fees_collected = to_pay; res->fees_due = td::zero_refint(); balance -= std::move(to_pay); - } else if (acc_status == Account::acc_frozen && !force_collect && to_pay + due_payment < cfg.delete_due_limit) { + } else if (acc_status == Account::acc_frozen && !force_collect && to_pay < cfg.delete_due_limit) { // do not collect fee res->last_paid_updated = (res->is_special ? 0 : account.last_paid); res->fees_collected = res->fees_due = td::zero_refint(); @@ -706,7 +907,7 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc res->fees_due = std::move(to_pay) - std::move(balance.grams); balance.grams = td::zero_refint(); if (!res->is_special) { - auto total_due = res->fees_due + due_payment; + auto total_due = res->fees_due; switch (acc_status) { case Account::acc_uninit: case Account::acc_frozen: @@ -743,12 +944,25 @@ bool Transaction::prepare_storage_phase(const StoragePhaseConfig& cfg, bool forc return true; } +/** + * Prepares the credit phase of a transaction. + * + * This function creates a CreditPhase object and performs the necessary calculations + * to determine the amount to be credited in the credit phase. It updates the due payment, + * credit, balance, and total fees accordingly. + * + * @returns True if the credit phase is prepared successfully, false otherwise. + */ bool Transaction::prepare_credit_phase() { credit_phase = std::make_unique(); - auto collected = std::min(msg_balance_remaining.grams, due_payment); - credit_phase->due_fees_collected = collected; - due_payment -= collected; - credit_phase->credit = msg_balance_remaining -= collected; + // Due payment is only collected in storage phase. + // For messages with bounce flag, contract always receives the amount specified in message + // auto collected = std::min(msg_balance_remaining.grams, due_payment); + // credit_phase->due_fees_collected = collected; + // due_payment -= collected; + // credit_phase->credit = msg_balance_remaining -= collected; + credit_phase->due_fees_collected = td::zero_refint(); + credit_phase->credit = msg_balance_remaining; if (!msg_balance_remaining.is_valid()) { LOG(ERROR) << "cannot compute the amount to be credited in the credit phase of transaction"; return false; @@ -759,17 +973,35 @@ bool Transaction::prepare_credit_phase() { LOG(ERROR) << "cannot credit currency collection to account"; return false; } - total_fees += std::move(collected); + // total_fees += std::move(collected); return true; } } // namespace transaction +/** + * Parses the gas limits and prices from a given cell. + * + * @param cell The cell containing the gas limits and prices serialized using GasLimitsPricing TLB-scheme. + * @param freeze_due_limit Reference to store the freeze due limit. + * @param delete_due_limit Reference to store the delete due limit. + * + * @returns True if the parsing is successful, false otherwise. + */ bool ComputePhaseConfig::parse_GasLimitsPrices(Ref cell, td::RefInt256& freeze_due_limit, td::RefInt256& delete_due_limit) { return cell.not_null() && parse_GasLimitsPrices(vm::load_cell_slice_ref(std::move(cell)), freeze_due_limit, delete_due_limit); } +/** + * Parses the gas limits and prices from a given cell slice. + * + * @param cs The cell slice containing the gas limits and prices serialized using GasLimitsPricing TLB-scheme. + * @param freeze_due_limit Reference to store the freeze due limit. + * @param delete_due_limit Reference to store the delete due limit. + * + * @returns True if the parsing is successful, false otherwise. + */ bool ComputePhaseConfig::parse_GasLimitsPrices(Ref cs, td::RefInt256& freeze_due_limit, td::RefInt256& delete_due_limit) { if (cs.is_null()) { @@ -784,6 +1016,17 @@ bool ComputePhaseConfig::parse_GasLimitsPrices(Ref cs, td::RefInt } } +/** + * Parses the gas limits and prices from a gas limits and prices record. + * + * @param cs The cell slice containing the gas limits and prices serialized using GasLimitsPricing TLB-scheme. + * @param freeze_due_limit A reference to store the freeze due limit. + * @param delete_due_limit A reference to store the delete due limit. + * @param _flat_gas_limit The flat gas limit. + * @param _flat_gas_price The flat gas price. + * + * @returns True if the parsing is successful, false otherwise. + */ bool ComputePhaseConfig::parse_GasLimitsPrices_internal(Ref cs, td::RefInt256& freeze_due_limit, td::RefInt256& delete_due_limit, td::uint64 _flat_gas_limit, td::uint64 _flat_gas_price) { @@ -812,6 +1055,14 @@ bool ComputePhaseConfig::parse_GasLimitsPrices_internal(Ref cs, t return true; } +/** + * Checks if an address is suspended according to the ConfigParam(44). + * + * @param wc The workchain ID. + * @param addr The account address address. + * + * @returns True if the address is suspended, False otherwise. + */ bool ComputePhaseConfig::is_address_suspended(ton::WorkchainId wc, td::Bits256 addr) const { if (!suspended_addresses) { return false; @@ -826,16 +1077,42 @@ bool ComputePhaseConfig::is_address_suspended(ton::WorkchainId wc, td::Bits256 a } } -void ComputePhaseConfig::compute_threshold() { - gas_price256 = td::make_refint(gas_price); +/** + * Computes the maximum gas fee based on the gas prices and limits. + * + * @param gas_price256 The gas price from config as RefInt256 + * @param gas_limit The gas limit from config + * @param flat_gas_limit The flat gas limit from config + * @param flat_gas_price The flat gas price from config + * + * @returns The maximum gas fee. + */ +static td::RefInt256 compute_max_gas_threshold(const td::RefInt256& gas_price256, td::uint64 gas_limit, + td::uint64 flat_gas_limit, td::uint64 flat_gas_price) { if (gas_limit > flat_gas_limit) { - max_gas_threshold = - td::rshift(gas_price256 * (gas_limit - flat_gas_limit), 16, 1) + td::make_bigint(flat_gas_price); + return td::rshift(gas_price256 * (gas_limit - flat_gas_limit), 16, 1) + td::make_bigint(flat_gas_price); } else { - max_gas_threshold = td::make_refint(flat_gas_price); + return td::make_refint(flat_gas_price); } } +/** + * Computes the maximum for gas fee based on the gas prices and limits. + * + * Updates max_gas_threshold. + */ +void ComputePhaseConfig::compute_threshold() { + gas_price256 = td::make_refint(gas_price); + max_gas_threshold = compute_max_gas_threshold(gas_price256, gas_limit, flat_gas_limit, flat_gas_price); +} + +/** + * Computes the amount of gas that can be bought for a given amount of nanograms. + * + * @param nanograms The amount of nanograms to compute gas for. + * + * @returns The amount of gas. + */ td::uint64 ComputePhaseConfig::gas_bought_for(td::RefInt256 nanograms) const { if (nanograms.is_null() || sgn(nanograms) < 0) { return 0; @@ -850,36 +1127,121 @@ td::uint64 ComputePhaseConfig::gas_bought_for(td::RefInt256 nanograms) const { return res->to_long() + flat_gas_limit; } +/** + * Computes the gas price. + * + * @param gas_used The amount of gas used. + * + * @returns The computed gas price. + */ td::RefInt256 ComputePhaseConfig::compute_gas_price(td::uint64 gas_used) const { return gas_used <= flat_gas_limit ? td::make_refint(flat_gas_price) : td::rshift(gas_price256 * (gas_used - flat_gas_limit), 16, 1) + flat_gas_price; } namespace transaction { + +/** + * Checks if it is required to increase gas_limit (from GasLimitsPrices config) to special_gas_limit * 2 + * from masterchain GasLimitsPrices config for the transaction. + * + * In January 2024 a highload wallet of @wallet Telegram bot in mainnet was stuck because current gas limit (1M) is + * not enough to clean up old queires, thus locking funds inside. + * See comment in crypto/smartcont/highload-wallet-v2-code.fc for details on why this happened. + * Account address: EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu + * It was proposed to validators to increase gas limit for this account for a limited amount of time (until 2024-02-29). + * It is activated by setting global version to 5 in ConfigParam 8. + * This config change also activates new behavior for special accounts in masterchain. + * + * @param cfg The compute phase configuration. + * @param now The Unix time of the transaction. + * @param account The account of the transaction. + * + * @returns True if gas_limit override is required, false otherwise + */ +static bool override_gas_limit(const ComputePhaseConfig& cfg, ton::UnixTime now, const Account& account) { + if (!cfg.special_gas_full) { + return false; + } + ton::UnixTime until = 1709164800; // 2024-02-29 00:00:00 UTC + ton::WorkchainId wc = 0; + const char* addr_hex = "FFBFD8F5AE5B2E1C7C3614885CB02145483DFAEE575F0DD08A72C366369211CD"; + return now < until && account.workchain == wc && account.addr.to_hex() == addr_hex; +} + +/** + * Computes the amount of gas that can be bought for a given amount of nanograms. + * Usually equal to `cfg.gas_bought_for(nanograms)` + * However, it overrides gas_limit from config in special cases. + * + * @param cfg The compute phase configuration. + * @param nanograms The amount of nanograms to compute gas for. + * + * @returns The amount of gas. + */ +td::uint64 Transaction::gas_bought_for(const ComputePhaseConfig& cfg, td::RefInt256 nanograms) { + if (override_gas_limit(cfg, now, account)) { + gas_limit_overridden = true; + // Same as ComputePhaseConfig::gas_bought for, but with other gas_limit and max_gas_threshold + auto gas_limit = cfg.mc_gas_prices.special_gas_limit * 2; + auto max_gas_threshold = + compute_max_gas_threshold(cfg.gas_price256, gas_limit, cfg.flat_gas_limit, cfg.flat_gas_price); + if (nanograms.is_null() || sgn(nanograms) < 0) { + return 0; + } + if (nanograms >= max_gas_threshold) { + return gas_limit; + } + if (nanograms < cfg.flat_gas_price) { + return 0; + } + auto res = td::div((std::move(nanograms) - cfg.flat_gas_price) << 16, cfg.gas_price256); + return res->to_long() + cfg.flat_gas_limit; + } + return cfg.gas_bought_for(nanograms); +} + +/** + * Computes the gas limits for a transaction. + * + * @param cp The ComputePhase object to store the computed gas limits. + * @param cfg The compute phase configuration. + * + * @returns True if the gas limits were successfully computed, false otherwise. + */ bool Transaction::compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& cfg) { // Compute gas limits if (account.is_special) { cp.gas_max = cfg.special_gas_limit; } else { - cp.gas_max = cfg.gas_bought_for(balance.grams); + cp.gas_max = gas_bought_for(cfg, balance.grams); } - cp.gas_credit = 0; - if (trans_type != tr_ord) { + if (trans_type != tr_ord || (account.is_special && cfg.special_gas_full)) { // may use all gas that can be bought using remaining balance cp.gas_limit = cp.gas_max; } else { // originally use only gas bought using remaining message balance // if the message is "accepted" by the smart contract, the gas limit will be set to gas_max - cp.gas_limit = std::min(cfg.gas_bought_for(msg_balance_remaining.grams), cp.gas_max); - if (!block::tlb::t_Message.is_internal(in_msg)) { - // external messages carry no balance, give them some credit to check whether they are accepted - cp.gas_credit = std::min(cfg.gas_credit, cp.gas_max); - } + cp.gas_limit = std::min(gas_bought_for(cfg, msg_balance_remaining.grams), cp.gas_max); + } + if (trans_type == tr_ord && !block::tlb::t_Message.is_internal(in_msg)) { + // external messages carry no balance, give them some credit to check whether they are accepted + cp.gas_credit = std::min(cfg.gas_credit, cp.gas_max); + } else { + cp.gas_credit = 0; } LOG(DEBUG) << "gas limits: max=" << cp.gas_max << ", limit=" << cp.gas_limit << ", credit=" << cp.gas_credit; return true; } +/** + * Prepares a TVM stack for a transaction. + * + * @param cp The compute phase object. + * + * @returns A reference to the prepared virtual machine stack. + * Returns an empty reference if the transaction type is invalid. + */ Ref Transaction::prepare_vm_stack(ComputePhase& cp) { Ref stack_ref{true}; td::RefInt256 acc_addr{true}; @@ -906,6 +1268,14 @@ Ref Transaction::prepare_vm_stack(ComputePhase& cp) { } } +/** + * Prepares a random seed for a transaction. + * + * @param rand_seed The output random seed. + * @param cfg The configuration for the compute phase. + * + * @returns True if the random seed was successfully prepared, false otherwise. + */ bool Transaction::prepare_rand_seed(td::BitArray<256>& rand_seed, const ComputePhaseConfig& cfg) const { // we might use SHA256(block_rand_seed . addr . trans_lt) // instead, we use SHA256(block_rand_seed . addr) @@ -918,6 +1288,15 @@ bool Transaction::prepare_rand_seed(td::BitArray<256>& rand_seed, const ComputeP return true; } +/** + * Prepares the c7 tuple (virtual machine context) for a compute phase of a transaction. + * + * @param cfg The configuration for the compute phase. + * + * @returns A reference to a Tuple object. + * + * @throws CollatorError if the rand_seed cannot be computed for the transaction. + */ Ref Transaction::prepare_vm_c7(const ComputePhaseConfig& cfg) const { td::BitArray<256> rand_seed; td::RefInt256 rand_seed_int{true}; @@ -947,17 +1326,34 @@ Ref Transaction::prepare_vm_c7(const ComputePhaseConfig& cfg) const { } tuple.push_back(storage_phase->fees_collected); // storage_fees:Integer - // See crypto/block/mc-config.cpp#2115 (get_prev_blocks_info) + // See crypto/block/mc-config.cpp#2223 (get_prev_blocks_info) // [ wc:Integer shard:Integer seqno:Integer root_hash:Integer file_hash:Integer] = BlockId; // [ last_mc_blocks:[BlockId...] // prev_key_block:BlockId ] : PrevBlocksInfo + // The only context where PrevBlocksInfo (13 parameter of c7) is null is inside emulator + // where it need to be set via transaction_emulator_set_prev_blocks_info (see emulator/emulator-extern.cpp) + // Inside validator, collator and liteserver checking external message contexts + // prev_blocks_info is always not null, since get_prev_blocks_info() + // may only return tuple or raise Error (See crypto/block/mc-config.cpp#2223) tuple.push_back(cfg.prev_blocks_info.not_null() ? vm::StackEntry(cfg.prev_blocks_info) : vm::StackEntry()); } + if (cfg.global_version >= 6) { + tuple.push_back(cfg.unpacked_config_tuple.not_null() ? vm::StackEntry(cfg.unpacked_config_tuple) + : vm::StackEntry()); // unpacked_config_tuple:[...] + tuple.push_back(due_payment.not_null() ? due_payment : td::zero_refint()); // due_payment:Integer + } auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); LOG(DEBUG) << "SmartContractInfo initialized with " << vm::StackEntry(tuple_ref).to_string(); return vm::make_tuple_ref(std::move(tuple_ref)); } +/** + * Computes the number of output actions in a list. + * + * @param list c5 cell. + * + * @returns The number of output actions. + */ int output_actions_count(Ref list) { int i = -1; do { @@ -972,7 +1368,16 @@ int output_actions_count(Ref list) { return i; } -bool Transaction::unpack_msg_state(bool lib_only) { +/** + * Unpacks the message StateInit. + * + * @param cfg The configuration for the compute phase. + * @param lib_only If true, only unpack libraries from the state. + * @param forbid_public_libs Don't allow public libraries in initstate. + * + * @returns True if the unpacking is successful, false otherwise. + */ +bool Transaction::unpack_msg_state(const ComputePhaseConfig& cfg, bool lib_only, bool forbid_public_libs) { block::gen::StateInit::Record state; if (in_msg_state.is_null() || !tlb::unpack_cell(in_msg_state, state)) { LOG(ERROR) << "cannot unpack StateInit from an inbound message"; @@ -996,12 +1401,32 @@ bool Transaction::unpack_msg_state(bool lib_only) { new_tock = z & 1; LOG(DEBUG) << "tick=" << new_tick << ", tock=" << new_tock; } + td::Ref old_code = new_code, old_data = new_data, old_library = new_library; new_code = state.code->prefetch_ref(); new_data = state.data->prefetch_ref(); new_library = state.library->prefetch_ref(); + auto size_limits = cfg.size_limits; + if (forbid_public_libs) { + size_limits.max_acc_public_libraries = 0; + } + auto S = check_state_limits(size_limits, false); + if (S.is_error()) { + LOG(DEBUG) << "Cannot unpack msg state: " << S.move_as_error(); + new_code = old_code; + new_data = old_data; + new_library = old_library; + return false; + } return true; } +/** + * Computes the set of libraries to be used during TVM execution. + * + * @param cfg The configuration for the compute phase. + * + * @returns A vector of hashmaps with libraries. + */ std::vector> Transaction::compute_vm_libraries(const ComputePhaseConfig& cfg) { std::vector> lib_set; if (in_msg_library.not_null()) { @@ -1017,6 +1442,11 @@ std::vector> Transaction::compute_vm_libraries(const ComputePhaseC return lib_set; } +/** + * Checks if the input message StateInit hash corresponds to the account address. + * + * @returns True if the input message state hash is valid, False otherwise. + */ bool Transaction::check_in_msg_state_hash() { CHECK(in_msg_state.not_null()); CHECK(new_split_depth >= 0 && new_split_depth < 32); @@ -1030,6 +1460,13 @@ bool Transaction::check_in_msg_state_hash() { return account.recompute_tmp_addr(my_addr, d, orig_addr_rewrite.bits()); } +/** + * Prepares the compute phase of a transaction, which includes running TVM. + * + * @param cfg The configuration for the compute phase. + * + * @returns True if the compute phase was successfully prepared and executed, false otherwise. + */ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { // TODO: add more skip verifications + sometimes use state from in_msg to re-activate // ... @@ -1067,7 +1504,9 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { return true; } use_msg_state = true; - if (!(unpack_msg_state() && account.check_split_depth(new_split_depth))) { + bool forbid_public_libs = + acc_status == Account::acc_uninit && account.is_masterchain(); // Forbid for deploying, allow for unfreezing + if (!(unpack_msg_state(cfg, false, forbid_public_libs) && account.check_split_depth(new_split_depth))) { LOG(DEBUG) << "cannot unpack in_msg_state, or it has bad split_depth; cannot init account state"; cp.skip_reason = ComputePhase::sk_bad_state; return true; @@ -1082,7 +1521,7 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { cp.skip_reason = in_msg_state.not_null() ? ComputePhase::sk_bad_state : ComputePhase::sk_no_state; return true; } else if (in_msg_state.not_null()) { - unpack_msg_state(true); // use only libraries + unpack_msg_state(cfg, true); // use only libraries } if (in_msg_extern && in_msg_state.not_null() && account.addr != in_msg_state->get_hash().bits()) { LOG(DEBUG) << "in_msg_state hash mismatch in external message"; @@ -1125,6 +1564,7 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { vm.set_global_version(cfg.global_version); vm.set_c7(prepare_vm_c7(cfg)); // tuple with SmartContractInfo vm.set_chksig_always_succeed(cfg.ignore_chksig); + vm.set_stop_on_accept_message(cfg.stop_on_accept_message); // vm.incr_stack_trace(1); // enable stack dump after each step LOG(DEBUG) << "starting VM"; @@ -1188,6 +1628,13 @@ bool Transaction::prepare_compute_phase(const ComputePhaseConfig& cfg) { return true; } +/** + * Prepares the action phase of a transaction. + * + * @param cfg The configuration for the action phase. + * + * @returns True if the action phase was prepared successfully, false otherwise. + */ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { if (!compute_phase || !compute_phase->success) { return false; @@ -1212,7 +1659,7 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { if (account.is_special) { return true; } - auto S = check_state_limits(cfg); + auto S = check_state_limits(cfg.size_limits); if (S.is_error()) { // Rollback changes to state, fail action phase LOG(INFO) << "Account state size exceeded limits: " << S.move_as_error(); @@ -1358,6 +1805,15 @@ bool Transaction::prepare_action_phase(const ActionPhaseConfig& cfg) { return true; } +/** + * Tries to set the code for an account. + * + * @param cs The CellSlice containing the action data serialized as action_set_code TLB-scheme. + * @param ap The action phase object. + * @param cfg The action phase configuration. + * + * @returns 0 if the code was successfully set, -1 otherwise. + */ int Transaction::try_action_set_code(vm::CellSlice& cs, ActionPhase& ap, const ActionPhaseConfig& cfg) { block::gen::OutAction::Record_action_set_code rec; if (!tlb::unpack_exact(cs, rec)) { @@ -1369,6 +1825,19 @@ int Transaction::try_action_set_code(vm::CellSlice& cs, ActionPhase& ap, const A return 0; } +/** + * Tries to change the library in the transaction. + * + * @param cs The cell slice containing the action data serialized as action_change_library TLB-scheme. + * @param ap The action phase object. + * @param cfg The action phase configuration. + * + * @returns 0 if the action was successfully performed, + * -1 if there was an error unpacking the data or the mode is invalid, + * 41 if the library reference is required but is null, + * 43 if the number of cells in the library exceeds the limit, + * 42 if there was a VM error during the operation. + */ int Transaction::try_action_change_library(vm::CellSlice& cs, ActionPhase& ap, const ActionPhaseConfig& cfg) { block::gen::OutAction::Record_action_change_library rec; if (!tlb::unpack_exact(cs, rec)) { @@ -1435,9 +1904,18 @@ int Transaction::try_action_change_library(vm::CellSlice& cs, ActionPhase& ap, c } } // namespace transaction -// msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms -// ihr_fwd_fees = ceil((msg_fwd_fees * ihr_price_factor)/2^16) nanograms -// bits in the root cell of a message are not included in msg.bits (lump_price pays for them) +/** + * Computes the forward fees for a message based on the number of cells and bits. + * + * msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms + * ihr_fwd_fees = ceil((msg_fwd_fees * ihr_price_factor)/2^16) nanograms + * bits in the root cell of a message are not included in msg.bits (lump_price pays for them) + * + * @param cells The number of cells in the message. + * @param bits The number of bits in the message. + * + * @returns The computed forward fees for the message. + */ td::uint64 MsgPrices::compute_fwd_fees(td::uint64 cells, td::uint64 bits) const { return lump_price + td::uint128(bit_price) .mult(bits) @@ -1447,6 +1925,34 @@ td::uint64 MsgPrices::compute_fwd_fees(td::uint64 cells, td::uint64 bits) const .lo(); } +/** + * Computes the forward fees for a message based on the number of cells and bits. + * Return the result as td::RefInt256 + * + * msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms + * ihr_fwd_fees = ceil((msg_fwd_fees * ihr_price_factor)/2^16) nanograms + * bits in the root cell of a message are not included in msg.bits (lump_price pays for them) + * + * @param cells The number of cells in the message. + * @param bits The number of bits in the message. + * + * @returns The computed forward fees for the message as td::RefInt256j. + */ +td::RefInt256 MsgPrices::compute_fwd_fees256(td::uint64 cells, td::uint64 bits) const { + return td::make_refint(lump_price) + + td::rshift(td::make_refint(bit_price) * bits + td::make_refint(cell_price) * cells, 16, + 1); // divide by 2^16 with ceil rounding +} + +/** + * Computes the forward fees and IHR fees for a message with the given number of cells and bits. + * + * @param cells The number of cells. + * @param bits The number of bits. + * @param ihr_disabled Flag indicating whether IHR is disabled. + * + * @returns A pair of values representing the forward fees and IHR fees. + */ std::pair MsgPrices::compute_fwd_ihr_fees(td::uint64 cells, td::uint64 bits, bool ihr_disabled) const { td::uint64 fwd = compute_fwd_fees(cells, bits); @@ -1456,19 +1962,47 @@ std::pair MsgPrices::compute_fwd_ihr_fees(td::uint64 cel return std::pair(fwd, td::uint128(fwd).mult(ihr_factor).shr(16).lo()); } +/** + * Computes the part of the fees that go to the total fees of the current block. + * + * @param total The amount of fees. + * + * @returns The the part of the fees that go to the total fees of the current block. + */ td::RefInt256 MsgPrices::get_first_part(td::RefInt256 total) const { return (std::move(total) * first_frac) >> 16; } +/** + * Computes the part of the fees that go to the total fees of the current block. + * + * @param total The amount of fees. + * + * @returns The the part of the fees that go to the total fees of the current block. + */ td::uint64 MsgPrices::get_first_part(td::uint64 total) const { return td::uint128(total).mult(first_frac).shr(16).lo(); } +/** + * Computes the part of the fees that go to the total fees of the transit block. + * + * @param total The amount of fees. + * + * @returns The the part of the fees that go to the total fees of the transit block. + */ td::RefInt256 MsgPrices::get_next_part(td::RefInt256 total) const { return (std::move(total) * next_frac) >> 16; } namespace transaction { +/** + * Checks if the source address is addr_none and replaces is with the account address. + * + * @param src_addr A reference to the source address of the message. + * + * @returns True if the source address is addr_none or is equal to the account address. + */ bool Transaction::check_replace_src_addr(Ref& src_addr) const { int t = (int)src_addr->prefetch_ulong(2); if (!t && src_addr->size_ext() == 2) { @@ -1489,6 +2023,15 @@ bool Transaction::check_replace_src_addr(Ref& src_addr) const { return false; } +/** + * Checks the destination address of a message, rewrites it if it is an anycast address. + * + * @param dest_addr A reference to the destination address of the transaction. + * @param cfg The configuration for the action phase. + * @param is_mc A pointer to a boolean where it will be stored whether the destination is in the masterchain. + * + * @returns True if the destination address is valid, false otherwise. + */ bool Transaction::check_rewrite_dest_addr(Ref& dest_addr, const ActionPhaseConfig& cfg, bool* is_mc) const { if (!dest_addr->prefetch_ulong(1)) { @@ -1551,11 +2094,6 @@ bool Transaction::check_rewrite_dest_addr(Ref& dest_addr, const A } if (rec.anycast->size() > 1) { // destination address is an anycast - if (rec.workchain_id == ton::masterchainId) { - // anycast addresses disabled in masterchain - LOG(DEBUG) << "masterchain destination address has an anycast field"; - return false; - } vm::CellSlice cs{*rec.anycast}; int d = (int)cs.fetch_ulong(6) - 32; if (d <= 0 || d > 30) { @@ -1595,6 +2133,17 @@ bool Transaction::check_rewrite_dest_addr(Ref& dest_addr, const A return true; } +/** + * Tries to send a message. + * + * @param cs0 The cell slice containing the action data serialized as action_send_msg TLB-scheme. + * @param ap The action phase. + * @param cfg The action phase configuration. + * @param redoing The index of the attempt, starting from 0. On later attempts tries to move message body and StateInit to separate cells. + * + * @returns 0 if the message is successfully sent or if the error may be ignored, error code otherwise. + * Returns -2 if the action should be attempted again. + */ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, const ActionPhaseConfig& cfg, int redoing) { block::gen::OutAction::Record_action_send_msg act_rec; @@ -1990,6 +2539,15 @@ int Transaction::try_action_send_msg(const vm::CellSlice& cs0, ActionPhase& ap, return 0; } +/** + * Tries to reserve a currency an action phase. + * + * @param cs The cell slice containing the action data serialized as action_reserve_currency TLB-scheme. + * @param ap The action phase. + * @param cfg The action phase configuration. + * + * @returns 0 if the currency is successfully reserved, error code otherwise. + */ int Transaction::try_action_reserve_currency(vm::CellSlice& cs, ActionPhase& ap, const ActionPhaseConfig& cfg) { block::gen::OutAction::Record_action_reserve_currency rec; if (!tlb::unpack_exact(cs, rec)) { @@ -2060,7 +2618,62 @@ int Transaction::try_action_reserve_currency(vm::CellSlice& cs, ActionPhase& ap, return 0; } -td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { +/** + * Calculates the number of public libraries in the dictionary. + * + * @param libraries The dictionary of account libraries. + * + * @returns The number of public libraries in the dictionary. + */ +static td::uint32 get_public_libraries_count(const td::Ref& libraries) { + td::uint32 count = 0; + vm::Dictionary dict{libraries, 256}; + dict.check_for_each([&](td::Ref value, td::ConstBitPtr key, int) { + if (block::is_public_library(key, std::move(value))) { + ++count; + } + return true; + }); + return count; +} + +/** + * Calculates the number of changes of public libraries in the dictionary. + * + * @param old_libraries The dictionary of account libraries before the transaction. + * @param new_libraries The dictionary of account libraries after the transaction. + * + * @returns The number of changed public libraries. + */ +static td::uint32 get_public_libraries_diff_count(const td::Ref& old_libraries, + const td::Ref& new_libraries) { + td::uint32 count = 0; + vm::Dictionary dict1{old_libraries, 256}; + vm::Dictionary dict2{new_libraries, 256}; + dict1.scan_diff(dict2, [&](td::ConstBitPtr key, int n, Ref val1, Ref val2) -> bool { + CHECK(n == 256); + bool is_public1 = val1.not_null() && block::is_public_library(key, val1); + bool is_public2 = val2.not_null() && block::is_public_library(key, val2); + if (is_public1 != is_public2) { + ++count; + } + return true; + }); + return count; +} + +/** + * Checks that the new account state fits in the limits. + * This function is not called for special accounts. + * + * @param size_limits The size limits configuration. + * @param update_storage_stat Store storage stat in the Transaction's CellStorageStat. + * + * @returns A `td::Status` indicating the result of the check. + * - If the state limits are within the allowed range, returns OK. + * - If the state limits exceed the maximum allowed range, returns an error. + */ +td::Status Transaction::check_state_limits(const SizeLimitsConfig& size_limits, bool update_storage_stat) { auto cell_equal = [](const td::Ref& a, const td::Ref& b) -> bool { if (a.is_null()) { return b.is_null(); @@ -2074,13 +2687,13 @@ td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { cell_equal(account.library, new_library)) { return td::Status::OK(); } - // new_storage_stat is used here beause these stats will be reused in compute_state() - new_storage_stat.limit_cells = cfg.size_limits.max_acc_state_cells; - new_storage_stat.limit_bits = cfg.size_limits.max_acc_state_bits; + vm::CellStorageStat storage_stat; + storage_stat.limit_cells = size_limits.max_acc_state_cells; + storage_stat.limit_bits = size_limits.max_acc_state_bits; td::Timer timer; auto add_used_storage = [&](const td::Ref& cell) -> td::Status { if (cell.not_null()) { - TRY_RESULT(res, new_storage_stat.add_used_storage(cell)); + TRY_RESULT(res, storage_stat.add_used_storage(cell)); if (res.max_merkle_depth > max_allowed_merkle_depth) { return td::Status::Error("too big merkle depth"); } @@ -2094,16 +2707,33 @@ td::Status Transaction::check_state_limits(const ActionPhaseConfig& cfg) { LOG(INFO) << "Compute used storage took " << timer.elapsed() << "s"; } if (acc_status == Account::acc_active) { - new_storage_stat.clear_limit(); + storage_stat.clear_limit(); } else { - new_storage_stat.clear(); + storage_stat.clear(); } - return new_storage_stat.cells <= cfg.size_limits.max_acc_state_cells && - new_storage_stat.bits <= cfg.size_limits.max_acc_state_bits - ? td::Status::OK() - : td::Status::Error("state too big"); + td::Status res; + if (storage_stat.cells > size_limits.max_acc_state_cells || storage_stat.bits > size_limits.max_acc_state_bits) { + res = td::Status::Error(PSTRING() << "account state is too big"); + } else if (account.is_masterchain() && !cell_equal(account.library, new_library) && + get_public_libraries_count(new_library) > size_limits.max_acc_public_libraries) { + res = td::Status::Error("too many public libraries"); + } else { + res = td::Status::OK(); + } + if (update_storage_stat) { + // storage_stat will be reused in compute_state() + new_storage_stat = std::move(storage_stat); + } + return res; } +/** + * Prepares the bounce phase of a transaction. + * + * @param cfg The configuration for the action phase. + * + * @returns True if the bounce phase was successfully prepared, false otherwise. + */ bool Transaction::prepare_bounce_phase(const ActionPhaseConfig& cfg) { if (in_msg.is_null() || !bounce_enabled) { return false; @@ -2209,6 +2839,14 @@ bool Transaction::prepare_bounce_phase(const ActionPhaseConfig& cfg) { * */ +/** + * Stores the account status in a CellBuilder object. + * + * @param cb The CellBuilder object to store the account status in. + * @param acc_status The account status to store. + * + * @returns True if the account status was successfully stored, false otherwise. + */ bool Account::store_acc_status(vm::CellBuilder& cb, int acc_status) const { int v; switch (acc_status) { @@ -2231,6 +2869,17 @@ bool Account::store_acc_status(vm::CellBuilder& cb, int acc_status) const { return cb.store_long_bool(v, 2); } +/** + * Tries to update the storage statistics based on the old storage statistics and old account state without fully recomputing it. + * + * It succeeds if only root cell of AccountStorage is changed. + * + * @param old_stat The old storage statistics. + * @param old_cs The old AccountStorage. + * @param new_cell The new AccountStorage. + * + * @returns An optional value of type vm::CellStorageStat. If the update is successful, it returns the new storage statistics. Otherwise, it returns an empty optional. + */ static td::optional try_update_storage_stat(const vm::CellStorageStat& old_stat, td::Ref old_cs, td::Ref new_cell) { @@ -2258,6 +2907,11 @@ static td::optional try_update_storage_stat(const vm::CellS } namespace transaction { +/** + * Computes the new state of the account. + * + * @returns True if the state computation is successful, false otherwise. + */ bool Transaction::compute_state() { if (new_total_state.not_null()) { return true; @@ -2360,6 +3014,13 @@ bool Transaction::compute_state() { return true; } +/** + * Serializes the transaction object using Transaction TLB-scheme. + * + * Updates root. + * + * @returns True if the serialization is successful, False otherwise. + */ bool Transaction::serialize() { if (root.not_null()) { return true; @@ -2444,14 +3105,14 @@ bool Transaction::serialize() { vm::load_cell_slice(root).print_rec(std::cerr); } - if (!block::gen::t_Transaction.validate_ref(root)) { + if (!block::gen::t_Transaction.validate_ref(4096, root)) { LOG(ERROR) << "newly-generated transaction failed to pass automated validation:"; vm::load_cell_slice(root).print_rec(std::cerr); block::gen::t_Transaction.print_ref(std::cerr, root); root.clear(); return false; } - if (!block::tlb::t_Transaction.validate_ref(root)) { + if (!block::tlb::t_Transaction.validate_ref(4096, root)) { LOG(ERROR) << "newly-generated transaction failed to pass hand-written validation:"; vm::load_cell_slice(root).print_rec(std::cerr); block::gen::t_Transaction.print_ref(std::cerr, root); @@ -2462,6 +3123,13 @@ bool Transaction::serialize() { return true; } +/** + * Serializes the storage phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the serialization is successful, false otherwise. + */ bool Transaction::serialize_storage_phase(vm::CellBuilder& cb) { if (!storage_phase) { return false; @@ -2485,6 +3153,13 @@ bool Transaction::serialize_storage_phase(vm::CellBuilder& cb) { return ok; } +/** + * Serializes the credit phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the credit phase was successfully serialized, false otherwise. + */ bool Transaction::serialize_credit_phase(vm::CellBuilder& cb) { if (!credit_phase) { return false; @@ -2494,6 +3169,13 @@ bool Transaction::serialize_credit_phase(vm::CellBuilder& cb) { return block::store_Maybe_Grams_nz(cb, cp.due_fees_collected) && cp.credit.store(cb); } +/** + * Serializes the compute phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the serialization was successful, false otherwise. + */ bool Transaction::serialize_compute_phase(vm::CellBuilder& cb) { if (!compute_phase) { return false; @@ -2536,6 +3218,13 @@ bool Transaction::serialize_compute_phase(vm::CellBuilder& cb) { return ok; } +/** + * Serializes the action phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the serialization is successful, false otherwise. + */ bool Transaction::serialize_action_phase(vm::CellBuilder& cb) { if (!action_phase) { return false; @@ -2560,6 +3249,13 @@ bool Transaction::serialize_action_phase(vm::CellBuilder& cb) { return ok; } +/** + * Serializes the bounce phase of a transaction. + * + * @param cb The CellBuilder to store the serialized data. + * + * @returns True if the bounce phase was successfully serialized, false otherwise. + */ bool Transaction::serialize_bounce_phase(vm::CellBuilder& cb) { if (!bounce_phase) { return false; @@ -2580,6 +3276,15 @@ bool Transaction::serialize_bounce_phase(vm::CellBuilder& cb) { } } +/** + * Estimates the block storage profile increment if the transaction is added to the block. + * + * @param store_stat The current storage statistics of the block. + * @param usage_tree The usage tree of the block. + * + * @returns The estimated block storage profile increment. + * Returns Error if the transaction is not serialized or if its new state is not computed. + */ td::Result Transaction::estimate_block_storage_profile_incr( const vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const { if (root.is_null()) { @@ -2591,33 +3296,30 @@ td::Result Transaction::estimate_block_storage_pro return store_stat.tentative_add_proof(new_total_state, usage_tree) + store_stat.tentative_add_cell(root); } -bool Transaction::update_block_storage_profile(vm::NewCellStorageStat& store_stat, - const vm::CellUsageTree* usage_tree) const { - if (root.is_null() || new_total_state.is_null()) { - return false; - } - store_stat.add_proof(new_total_state, usage_tree); - store_stat.add_cell(root); - return true; -} - -bool Transaction::would_fit(unsigned cls, const block::BlockLimitStatus& blimst) const { - auto res = estimate_block_storage_profile_incr(blimst.st_stat, blimst.limits.usage_tree); - if (res.is_error()) { - LOG(ERROR) << res.move_as_error(); - return false; - } - auto extra = res.move_as_ok(); - return blimst.would_fit(cls, end_lt, gas_used(), &extra); -} - -bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) const { - if (!(blimst.update_lt(end_lt) && blimst.update_gas(gas_used()))) { +/** + * Updates the limits status of a block. + * + * @param blimst The block limit status object to update. + * @param with_size Flag indicating whether to update the size limits. + * + * @returns True if the limits were successfully updated, False otherwise. + */ +bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_gas, bool with_size) const { + if (!(blimst.update_lt(end_lt) && blimst.update_gas(with_gas ? gas_used() : 0))) { return false; } if (with_size) { - return blimst.add_proof(new_total_state) && blimst.add_cell(root) && blimst.add_transaction() && - blimst.add_account(is_first); + if (!(blimst.add_proof(new_total_state) && blimst.add_cell(root) && blimst.add_transaction() && + blimst.add_account(is_first))) { + return false; + } + if (account.is_masterchain()) { + if (was_frozen || was_deleted) { + blimst.public_library_diff += get_public_libraries_count(account.orig_library); + } else { + blimst.public_library_diff += get_public_libraries_diff_count(account.orig_library, new_library); + } + } } return true; } @@ -2628,6 +3330,13 @@ bool Transaction::update_limits(block::BlockLimitStatus& blimst, bool with_size) * */ +/** + * Commits a transaction for a given account. + * + * @param acc The account to commit the transaction for. + * + * @returns A reference to the root cell of the serialized transaction. + */ Ref Transaction::commit(Account& acc) { CHECK(account.last_trans_end_lt_ <= start_lt && start_lt < end_lt); CHECK(root.not_null()); @@ -2672,14 +3381,33 @@ Ref Transaction::commit(Account& acc) { return root; } +/** + * Extracts the output message at the specified index from the transaction. + * + * @param i The index of the output message to extract. + * + * @returns A pair of the logical time and the extracted output message. + */ LtCellRef Transaction::extract_out_msg(unsigned i) { return {start_lt + i + 1, std::move(out_msgs.at(i))}; } +/** + * Extracts the output message at index i from the transaction. + * + * @param i The index of the output message to extract. + * + * @returns A triple of the logical time, the extracted output message and the transaction root. + */ NewOutMsg Transaction::extract_out_msg_ext(unsigned i) { return {start_lt + i + 1, std::move(out_msgs.at(i)), root}; } +/** + * Extracts the outgoing messages from the transaction and adds them to the given list. + * + * @param list The list to which the outgoing messages will be added. + */ void Transaction::extract_out_msgs(std::vector& list) { for (unsigned i = 0; i < out_msgs.size(); i++) { list.emplace_back(start_lt + i + 1, std::move(out_msgs[i])); @@ -2687,10 +3415,23 @@ void Transaction::extract_out_msgs(std::vector& list) { } } // namespace transaction +/** + * Adds a transaction to the account's transaction list. + * + * @param trans_root The root of the transaction cell. + * @param trans_lt The logical time of the transaction. + */ void Account::push_transaction(Ref trans_root, ton::LogicalTime trans_lt) { transactions.emplace_back(trans_lt, std::move(trans_root)); } +/** + * Serializes an account block for the account using AccountBlock TLB-scheme. + * + * @param cb The CellBuilder used to store the serialized data. + * + * @returns True if the account block was successfully created, false otherwise. + */ bool Account::create_account_block(vm::CellBuilder& cb) { if (transactions.empty()) { return false; @@ -2719,6 +3460,11 @@ bool Account::create_account_block(vm::CellBuilder& cb) { && cb.store_ref_bool(cb2.finalize()); // state_update:^(HASH_UPDATE Account) } +/** + * Checks if the libraries stored in the account object have changed. + * + * @returns True if the libraries have changed, False otherwise. + */ bool Account::libraries_changed() const { bool s = orig_library.not_null(); bool t = library.not_null(); @@ -2729,6 +3475,21 @@ bool Account::libraries_changed() const { } } +/** + * Fetches and initializes various configuration parameters from masterchain config for transaction processing. + * + * @param config The masterchain configuration. + * @param old_mparams Pointer to store a dictionary of mandatory parameters (ConfigParam 9). + * @param storage_prices Pointer to store the storage prices. + * @param storage_phase_cfg Pointer to store the storage phase configuration. + * @param rand_seed Pointer to the random seed. Generates a new seed if the value is `td::Bits256::zero()`. + * @param compute_phase_cfg Pointer to store the compute phase configuration. + * @param action_phase_cfg Pointer to store the action phase configuration. + * @param masterchain_create_fee Pointer to store the masterchain create fee. + * @param basechain_create_fee Pointer to store the basechain create fee. + * @param wc The workchain ID. + * @param now The current Unix time. + */ td::Status FetchConfigParams::fetch_config_params( const block::ConfigInfo& config, Ref* old_mparams, std::vector* storage_prices, StoragePhaseConfig* storage_phase_cfg, td::BitArray<256>* rand_seed, ComputePhaseConfig* compute_phase_cfg, @@ -2744,6 +3505,22 @@ td::Status FetchConfigParams::fetch_config_params( basechain_create_fee, wc, now); } +/** + * Fetches and initializes various configuration parameters from masterchain config for transaction processing. + * + * @param config The masterchain configuration. + * @param prev_blocks_info The tuple with information about previous blocks. + * @param old_mparams Pointer to store a dictionary of mandatory parameters (ConfigParam 9). + * @param storage_prices Pointer to store the storage prices. + * @param storage_phase_cfg Pointer to store the storage phase configuration. + * @param rand_seed Pointer to the random seed. Generates a new seed if the value is `td::Bits256::zero()`. + * @param compute_phase_cfg Pointer to store the compute phase configuration. + * @param action_phase_cfg Pointer to store the action phase configuration. + * @param masterchain_create_fee Pointer to store the masterchain create fee. + * @param basechain_create_fee Pointer to store the basechain create fee. + * @param wc The workchain ID. + * @param now The current Unix time. + */ td::Status FetchConfigParams::fetch_config_params( const block::Config& config, td::Ref prev_blocks_info, Ref* old_mparams, std::vector* storage_prices, StoragePhaseConfig* storage_phase_cfg, @@ -2774,6 +3551,10 @@ td::Status FetchConfigParams::fetch_config_params( storage_phase_cfg->delete_due_limit)) { return td::Status::Error(-668, "cannot unpack current gas prices and limits from masterchain configuration"); } + TRY_RESULT_PREFIX(mc_gas_prices, config.get_gas_limits_prices(true), + "cannot unpack masterchain gas prices and limits: "); + compute_phase_cfg->mc_gas_prices = std::move(mc_gas_prices); + compute_phase_cfg->special_gas_full = config.get_global_version() >= 5; storage_phase_cfg->enable_due_payment = config.get_global_version() >= 4; compute_phase_cfg->block_rand_seed = *rand_seed; compute_phase_cfg->max_vm_data_depth = size_limits.max_vm_data_depth; @@ -2782,7 +3563,11 @@ td::Status FetchConfigParams::fetch_config_params( if (compute_phase_cfg->global_version >= 4) { compute_phase_cfg->prev_blocks_info = std::move(prev_blocks_info); } + if (compute_phase_cfg->global_version >= 6) { + compute_phase_cfg->unpacked_config_tuple = config.get_unpacked_config_tuple(now); + } compute_phase_cfg->suspended_addresses = config.get_suspended_addresses(now); + compute_phase_cfg->size_limits = size_limits; } { // compute action_phase_cfg diff --git a/crypto/block/transaction.h b/crypto/block/transaction.h index 3ae8cdf8..57defc8c 100644 --- a/crypto/block/transaction.h +++ b/crypto/block/transaction.h @@ -104,6 +104,8 @@ struct ComputePhaseConfig { td::uint64 gas_credit; td::uint64 flat_gas_limit = 0; td::uint64 flat_gas_price = 0; + bool special_gas_full = false; + block::GasLimitsPrices mc_gas_prices; static constexpr td::uint64 gas_infty = (1ULL << 63) - 1; td::RefInt256 gas_price256; td::RefInt256 max_gas_threshold; @@ -115,15 +117,13 @@ struct ComputePhaseConfig { td::uint16 max_vm_data_depth = 512; int global_version = 0; Ref prev_blocks_info; + Ref unpacked_config_tuple; std::unique_ptr suspended_addresses; + SizeLimitsConfig size_limits; int vm_log_verbosity = 0; + bool stop_on_accept_message = false; - ComputePhaseConfig(td::uint64 _gas_price = 0, td::uint64 _gas_limit = 0, td::uint64 _gas_credit = 0) - : gas_price(_gas_price), gas_limit(_gas_limit), special_gas_limit(_gas_limit), gas_credit(_gas_credit) { - compute_threshold(); - } - ComputePhaseConfig(td::uint64 _gas_price, td::uint64 _gas_limit, td::uint64 _spec_gas_limit, td::uint64 _gas_credit) - : gas_price(_gas_price), gas_limit(_gas_limit), special_gas_limit(_spec_gas_limit), gas_credit(_gas_credit) { + ComputePhaseConfig() : gas_price(0), gas_limit(0), special_gas_limit(0), gas_credit(0) { compute_threshold(); } void compute_threshold(); @@ -270,7 +270,7 @@ struct Account { return balance; } bool set_address(ton::WorkchainId wc, td::ConstBitPtr new_addr); - bool unpack(Ref account, Ref extra, ton::UnixTime now, bool special = false); + bool unpack(Ref account, ton::UnixTime now, bool special); bool init_new(ton::UnixTime now); bool deactivate(); bool recompute_tmp_addr(Ref& tmp_addr, int split_depth, td::ConstBitPtr orig_addr_rewrite) const; @@ -361,18 +361,20 @@ struct Transaction { std::unique_ptr action_phase; std::unique_ptr bounce_phase; vm::CellStorageStat new_storage_stat; + bool gas_limit_overridden{false}; Transaction(const Account& _account, int ttype, ton::LogicalTime req_start_lt, ton::UnixTime _now, Ref _inmsg = {}); bool unpack_input_msg(bool ihr_delivered, const ActionPhaseConfig* cfg); bool check_in_msg_state_hash(); bool prepare_storage_phase(const StoragePhaseConfig& cfg, bool force_collect = true, bool adjust_msg_value = false); bool prepare_credit_phase(); + td::uint64 gas_bought_for(const ComputePhaseConfig& cfg, td::RefInt256 nanograms); bool compute_gas_limits(ComputePhase& cp, const ComputePhaseConfig& cfg); Ref prepare_vm_stack(ComputePhase& cp); std::vector> compute_vm_libraries(const ComputePhaseConfig& cfg); bool prepare_compute_phase(const ComputePhaseConfig& cfg); bool prepare_action_phase(const ActionPhaseConfig& cfg); - td::Status check_state_limits(const ActionPhaseConfig& cfg); + td::Status check_state_limits(const SizeLimitsConfig& size_limits, bool update_storage_stat = true); bool prepare_bounce_phase(const ActionPhaseConfig& cfg); bool compute_state(); bool serialize(); @@ -382,9 +384,7 @@ struct Transaction { td::Result estimate_block_storage_profile_incr( const vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const; - bool update_block_storage_profile(vm::NewCellStorageStat& store_stat, const vm::CellUsageTree* usage_tree) const; - bool would_fit(unsigned cls, const block::BlockLimitStatus& blk_lim_st) const; - bool update_limits(block::BlockLimitStatus& blk_lim_st, bool with_size = true) const; + bool update_limits(block::BlockLimitStatus& blk_lim_st, bool with_gas = true, bool with_size = true) const; Ref commit(Account& _account); // _account should point to the same account LtCellRef extract_out_msg(unsigned i); @@ -406,7 +406,7 @@ struct Transaction { bool serialize_compute_phase(vm::CellBuilder& cb); bool serialize_action_phase(vm::CellBuilder& cb); bool serialize_bounce_phase(vm::CellBuilder& cb); - bool unpack_msg_state(bool lib_only = false); + bool unpack_msg_state(const ComputePhaseConfig& cfg, bool lib_only = false, bool forbid_public_libs = false); }; } // namespace transaction diff --git a/crypto/common/bitstring.cpp b/crypto/common/bitstring.cpp index c10a4ff3..5135cdf0 100644 --- a/crypto/common/bitstring.cpp +++ b/crypto/common/bitstring.cpp @@ -130,7 +130,7 @@ void bits_memcpy(unsigned char* to, int to_offs, const unsigned char* from, int from_offs &= 7; to_offs &= 7; //fprintf(stderr, "bits_memcpy: from=%p (%02x) to=%p (%02x) from_offs=%d to_offs=%d count=%lu\n", from, *from, to, *to, from_offs, to_offs, bit_count); - int sz = (int)bit_count; + int sz = static_cast(bit_count); bit_count += from_offs; if (from_offs == to_offs) { if (bit_count < 8) { @@ -206,7 +206,7 @@ void bits_memset(unsigned char* to, int to_offs, bool val, std::size_t bit_count } to += (to_offs >> 3); to_offs &= 7; - int sz = (int)bit_count; + int sz = static_cast(bit_count); bit_count += to_offs; int c = *to; if (bit_count <= 8) { diff --git a/crypto/fift/lib/Asm.fif b/crypto/fift/lib/Asm.fif index d9cee5d8..92ceab6d 100644 --- a/crypto/fift/lib/Asm.fif +++ b/crypto/fift/lib/Asm.fif @@ -2,7 +2,7 @@ library TVM_Asm // simple TVM Assembler namespace Asm Asm definitions -"0.4.4" constant asm-fif-version +"0.4.5" constant asm-fif-version variable @atend variable @was-split @@ -461,19 +461,109 @@ x{B7A3} @Defop QNEGATE x{B7A4} @Defop QINC x{B7A5} @Defop QDEC x{B7A8} @Defop QMUL + x{B7A904} @Defop QDIV x{B7A905} @Defop QDIVR x{B7A906} @Defop QDIVC x{B7A908} @Defop QMOD +x{B7A909} @Defop QMODR +x{B7A90A} @Defop QMODC x{B7A90C} @Defop QDIVMOD x{B7A90D} @Defop QDIVMODR x{B7A90E} @Defop QDIVMODC x{B7A900} @Defop QADDDIVMOD x{B7A901} @Defop QADDDIVMODR x{B7A902} @Defop QADDDIVMODC + +x{B7A925} @Defop QRSHIFTR +x{B7A926} @Defop QRSHIFTC +x{B7A928} @Defop QMODPOW2 +x{B7A929} @Defop QMODPOW2R +x{B7A92A} @Defop QMODPOW2C +x{B7A92C} @Defop QRSHIFTMOD +x{B7A92D} @Defop QRSHIFTMODR +x{B7A92E} @Defop QRSHIFTMODC +x{B7A920} @Defop QADDRSHIFTMOD +x{B7A921} @Defop QADDRSHIFTMODR +x{B7A922} @Defop QADDRSHIFTMODC + +x{B7A935} @Defop(8u+1) QRSHIFTR# +x{B7A936} @Defop(8u+1) QRSHIFTC# +x{B7A938} @Defop(8u+1) QMODPOW2# +x{B7A939} @Defop(8u+1) QMODPOW2R# +x{B7A93A} @Defop(8u+1) QMODPOW2C# +x{B7A93C} @Defop(8u+1) QRSHIFT#MOD +x{B7A93D} @Defop(8u+1) QRSHIFTR#MOD +x{B7A93E} @Defop(8u+1) QRSHIFTC#MOD +x{B7A930} @Defop(8u+1) QADDRSHIFT#MOD +x{B7A931} @Defop(8u+1) QADDRSHIFTR#MOD +x{B7A932} @Defop(8u+1) QADDRSHIFTC#MOD + +x{B7A984} @Defop QMULDIV x{B7A985} @Defop QMULDIVR +x{B7A986} @Defop QMULDIVC +x{B7A988} @Defop QMULMOD +x{B7A989} @Defop QMULMODR +x{B7A98A} @Defop QMULMODC x{B7A98C} @Defop QMULDIVMOD -x{B7A980} @Defop QADDMULDIVMOD +x{B7A98D} @Defop QMULDIVMODR +x{B7A98E} @Defop QMULDIVMODC +x{B7A980} @Defop QMULADDDIVMOD +x{B7A981} @Defop QMULADDDIVMODR +x{B7A982} @Defop QMULADDDIVMODC + +x{B7A9A4} @Defop QMULRSHIFT +x{B7A9A5} @Defop QMULRSHIFTR +x{B7A9A6} @Defop QMULRSHIFTC +x{B7A9A8} @Defop QMULMODPOW2 +x{B7A9A9} @Defop QMULMODPOW2R +x{B7A9AA} @Defop QMULMODPOW2C +x{B7A9AC} @Defop QMULRSHIFTMOD +x{B7A9AD} @Defop QMULRSHIFTRMOD +x{B7A9AE} @Defop QMULRSHIFTCMOD +x{B7A9A0} @Defop QMULADDRSHIFTMOD +x{B7A9A1} @Defop QMULADDRSHIFTRMOD +x{B7A9A2} @Defop QMULADDRSHIFTCMOD + +x{B7A9B4} @Defop(8u+1) QMULRSHIFT# +x{B7A9B5} @Defop(8u+1) QMULRSHIFTR# +x{B7A9B6} @Defop(8u+1) QMULRSHIFTC# +x{B7A9B8} @Defop(8u+1) QMULMODPOW2# +x{B7A9B9} @Defop(8u+1) QMULMODPOW2R# +x{B7A9BA} @Defop(8u+1) QMULMODPOW2C# +x{B7A9BC} @Defop(8u+1) QMULRSHIFT#MOD +x{B7A9BD} @Defop(8u+1) QMULRSHIFTR#MOD +x{B7A9BE} @Defop(8u+1) QMULRSHIFTC#MOD +x{B7A9B0} @Defop(8u+1) QMULADDRSHIFT#MOD +x{B7A9B1} @Defop(8u+1) QMULADDRSHIFTR#MOD +x{B7A9B2} @Defop(8u+1) QMULADDRSHIFTC#MOD + +x{B7A9C4} @Defop QLSHIFTDIV +x{B7A9C5} @Defop QLSHIFTDIVR +x{B7A9C6} @Defop QLSHIFTDIVC +x{B7A9C8} @Defop QLSHIFTMOD +x{B7A9C9} @Defop QLSHIFTMODR +x{B7A9CA} @Defop QLSHIFTMODC +x{B7A9CC} @Defop QLSHIFTDIVMOD +x{B7A9CD} @Defop QLSHIFTDIVMODR +x{B7A9CE} @Defop QLSHIFTDIVMODC +x{B7A9C0} @Defop QLSHIFTADDDIVMOD +x{B7A9C1} @Defop QLSHIFTADDDIVMODR +x{B7A9C2} @Defop QLSHIFTADDDIVMODC + +x{B7A9D4} @Defop(8u+1) QLSHIFT#DIV +x{B7A9D5} @Defop(8u+1) QLSHIFT#DIVR +x{B7A9D6} @Defop(8u+1) QLSHIFT#DIVC +x{B7A9D8} @Defop(8u+1) QLSHIFT#MOD +x{B7A9D9} @Defop(8u+1) QLSHIFT#MODR +x{B7A9DA} @Defop(8u+1) QLSHIFT#MODC +x{B7A9DC} @Defop(8u+1) QLSHIFT#DIVMOD +x{B7A9DD} @Defop(8u+1) QLSHIFT#DIVMODR +x{B7A9DE} @Defop(8u+1) QLSHIFT#DIVMODC +x{B7A9D0} @Defop(8u+1) QLSHIFT#ADDDIVMOD +x{B7A9D1} @Defop(8u+1) QLSHIFT#ADDDIVMODR +x{B7A9D2} @Defop(8u+1) QLSHIFT#ADDDIVMODC + x{B7AC} @Defop QLSHIFT x{B7AD} @Defop QRSHIFT x{B7AE} @Defop QPOW2 @@ -723,6 +813,12 @@ x{D761} @Defop LDONES x{D762} @Defop LDSAME x{D764} @Defop SDEPTH x{D765} @Defop CDEPTH +x{D766} @Defop CLEVEL +x{D767} @Defop CLEVELMASK +{ B \"%s\" B>file "%(COMPILED_ST_FIF, COMPILED_ST_BOC)) - res = subprocess.run([FIFT_EXECUTABLE, COMPILED_BUILD_BOC ], capture_output=True, timeout=10) - - + COMPILED_BUILD_BOC = os.path.join(TMP_DIR, "build-boc.fif") + res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_ST_FIF, "-SPA", f.replace("storage-provider.fc","storage-contract.fc")], capture_output=False, timeout=10) + with open(COMPILED_BUILD_BOC, "w") as scr: + scr.write("\"%s\" include boc>B \"%s\" B>file "%(COMPILED_ST_FIF, COMPILED_ST_BOC)) + res = subprocess.run([FIFT_EXECUTABLE, COMPILED_BUILD_BOC ], capture_output=True, timeout=10) + + res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_FIF, "-SPA", f], capture_output=True, timeout=10) except Exception as e: - post_process_func(f) - raise e + post_process_func(f) + raise e else: - post_process_func(f) + post_process_func(f) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) def run_runner(): - res = subprocess.run([FIFT_EXECUTABLE, "-I", FIFT_LIBS, RUNNER_FIF], capture_output=True, timeout=10) + res = subprocess.run([FIFT_EXECUTABLE, RUNNER_FIF], capture_output=True, timeout=10) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) s = str(res.stdout, "utf-8") @@ -138,16 +137,15 @@ for ti, t in enumerate(tests): try: func_out = run_runner() if func_out != th: - raise ExecutionError("Error : expected '%d', found '%d'" % (th, func_out)) + raise ExecutionError("Error : expected '%d', found '%d'" % (th, func_out)) success += 1 except ExecutionError as e: print(e, file=sys.stderr) - #print("Compiled:", file=sys.stderr) - #with open(COMPILED_FIF, "r") as f: - # print(f.read(), file=sys.stderr) - #exit(2) + print("Compiled:", file=sys.stderr) + with open(COMPILED_FIF, "r") as f: + print(f.read(), file=sys.stderr) + exit(2) print(" OK ", file=sys.stderr) print(get_version()) -print("Done: Success %d, Error: %d"%(success, len(tests)-success), file=sys.stderr) - +print("Done: Success %d, Error: %d"%(success, len(tests)-success), file=sys.stderr) \ No newline at end of file diff --git a/crypto/func/auto-tests/run_tests.py b/crypto/func/auto-tests/run_tests.py index 0f12332d..158e871b 100644 --- a/crypto/func/auto-tests/run_tests.py +++ b/crypto/func/auto-tests/run_tests.py @@ -4,6 +4,7 @@ import subprocess import sys import tempfile + def getenv(name, default=None): if name in os.environ: return os.environ[name] @@ -12,10 +13,9 @@ def getenv(name, default=None): exit(1) return default + FUNC_EXECUTABLE = getenv("FUNC_EXECUTABLE", "func") FIFT_EXECUTABLE = getenv("FIFT_EXECUTABLE", "fift") -#FUNC_STDLIB = getenv("FUNC_STDLIB") -FIFT_LIBS = getenv("FIFT_LIBS") TMP_DIR = tempfile.mkdtemp() COMPILED_FIF = os.path.join(TMP_DIR, "compiled.fif") RUNNER_FIF = os.path.join(TMP_DIR, "runner.fif") @@ -25,22 +25,26 @@ if len(sys.argv) != 2: exit(1) TESTS_DIR = sys.argv[1] + class ExecutionError(Exception): pass + def compile_func(f): res = subprocess.run([FUNC_EXECUTABLE, "-o", COMPILED_FIF, "-SPA", f], capture_output=True, timeout=10) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) + def run_runner(): - res = subprocess.run([FIFT_EXECUTABLE, "-I", FIFT_LIBS, RUNNER_FIF], capture_output=True, timeout=10) + res = subprocess.run([FIFT_EXECUTABLE, RUNNER_FIF], capture_output=True, timeout=10) if res.returncode != 0: raise ExecutionError(str(res.stderr, "utf-8")) s = str(res.stdout, "utf-8") s = [x.strip() for x in s.split("\n")] return [x for x in s if x != ""] + tests = [s for s in os.listdir(TESTS_DIR) if s.endswith(".fc")] tests.sort() print("Found", len(tests), "tests", file=sys.stderr) @@ -68,18 +72,18 @@ for ti, tf in enumerate(tests): # preprocess arithmetics in input for i in range(len(cases)): - inputs = cases[i][1].split(" ") - processed_inputs = "" - for in_arg in inputs: - if "x{" in in_arg: - processed_inputs += in_arg - continue - # filter and execute - # is it safe enough? - filtered_in = "".join(filter(lambda x: x in "0x123456789()+-*/<>", in_arg)) - if(filtered_in): - processed_inputs += str(eval(filtered_in)) + " "; - cases[i][1] = processed_inputs.strip() + inputs = cases[i][1].split(" ") + processed_inputs = "" + for in_arg in inputs: + if "x{" in in_arg: + processed_inputs += in_arg + continue + # filter and execute + # is it safe enough? + filtered_in = "".join(filter(lambda x: x in "0x123456789()+-*/<>", in_arg)) + if filtered_in: + processed_inputs += str(eval(filtered_in)) + " " + cases[i][1] = processed_inputs.strip() with open(RUNNER_FIF, "w") as f: print("\"%s\" include >(std::istream& is, Bignum& x) { return is; } -bool is_prime(const Bignum& p, int nchecks, bool trial_div) { - return BN_is_prime_fasttest_ex(p.bn_ptr(), BN_prime_checks, get_ctx(), trial_div, 0); +bool is_prime(const Bignum& p) { +#if OPENSSL_VERSION_MAJOR >= 3 + int result = BN_check_prime(p.bn_ptr(), get_ctx(), nullptr); + LOG_IF(FATAL, result == -1); + return result; +#else + return BN_is_prime_fasttest_ex(p.bn_ptr(), BN_prime_checks, get_ctx(), true, 0); +#endif } } // namespace arith diff --git a/crypto/openssl/bignum.h b/crypto/openssl/bignum.h index 2a8dd8a0..032dbb02 100644 --- a/crypto/openssl/bignum.h +++ b/crypto/openssl/bignum.h @@ -335,7 +335,7 @@ const Bignum sqr(const Bignum& x); std::ostream& operator<<(std::ostream& os, const Bignum& x); std::istream& operator>>(std::istream& is, Bignum& x); -bool is_prime(const Bignum& p, int nchecks = 64, bool trial_div = true); +bool is_prime(const Bignum& p); inline int cmp(const Bignum& x, const Bignum& y) { return BN_cmp(x.bn_ptr(), y.bn_ptr()); diff --git a/crypto/parser/lexer.cpp b/crypto/parser/lexer.cpp index 624d8dd2..117f1df5 100644 --- a/crypto/parser/lexer.cpp +++ b/crypto/parser/lexer.cpp @@ -250,7 +250,6 @@ const Lexem& Lexer::next() { } if (is_multiline_quote(src.get_ptr(), src.get_end_ptr())) { src.advance(multiline_quote.size()); - const char* begin = src.get_ptr(); const char* end = nullptr; SrcLocation here = src.here(); std::string body; diff --git a/crypto/smartcont/highload-wallet-v2-code.fc b/crypto/smartcont/highload-wallet-v2-code.fc index 7dd65f9e..b7626bbe 100644 --- a/crypto/smartcont/highload-wallet-v2-code.fc +++ b/crypto/smartcont/highload-wallet-v2-code.fc @@ -3,6 +3,22 @@ ;; this version does not use seqno for replay protection; instead, it remembers all recent query_ids ;; in this way several external messages with different query_id can be sent in parallel + +;; Note, when dealing with highload-wallet the following limits need to be checked and taken into account: +;; 1) Storage size limit. Currently, size of contract storage should be less than 65535 cells. If size of +;; old_queries will grow above this limit, exception in ActionPhase will be thrown and transaction will fail. +;; Failed transaction may be replayed. +;; 2) Gas limit. Currently, gas limit is 1'000'000 gas units, that means that there is a limit of how much +;; old queries may be cleaned in one tx. If number of expired queries will be higher, contract will stuck. + +;; That means that it is not recommended to set too high expiration date: +;; number of queries during expiration timespan should not exceed 1000. +;; Also, number of expired queries cleaned in one transaction should be below 100. + +;; Such precautions are not easy to follow, so it is recommended to use highload contract +;; only when strictly necessary and the developer understands the above details. + + () recv_internal(slice in_msg) impure { ;; do nothing for internal messages } diff --git a/crypto/smc-envelope/GenericAccount.cpp b/crypto/smc-envelope/GenericAccount.cpp index 4cd6bf3f..04249699 100644 --- a/crypto/smc-envelope/GenericAccount.cpp +++ b/crypto/smc-envelope/GenericAccount.cpp @@ -155,7 +155,7 @@ td::Result GenericAccount::get_wallet_id(const SmartContract& sc) { return TRY_VM([&]() -> td::Result { auto answer = sc.run_get_method("wallet_id"); if (!answer.success) { - return td::Status::Error("seqno get method failed"); + return td::Status::Error("wallet_id get method failed"); } return static_cast(answer.stack.write().pop_long_range(std::numeric_limits::max())); }()); diff --git a/crypto/smc-envelope/ManualDns.h b/crypto/smc-envelope/ManualDns.h index b5dee59a..d24cd023 100644 --- a/crypto/smc-envelope/ManualDns.h +++ b/crypto/smc-envelope/ManualDns.h @@ -305,7 +305,7 @@ class ManualDns : public ton::SmartContract, public DnsInterface { if (!info.known_category.insert(action.category).second) { continue; } - if (action.category == 0) { + if (action.category.is_zero()) { info.closed = true; auto old_actions = std::move(info.actions); bool is_empty = true; @@ -327,7 +327,7 @@ class ManualDns : public ton::SmartContract, public DnsInterface { if (info.closed) { CombinedActions ca; ca.name = it.first; - ca.category = 0; + ca.category = td::Bits256::zero(); if (!info.actions.empty() || info.non_empty) { ca.actions = std::move(info.actions); } diff --git a/crypto/smc-envelope/SmartContract.cpp b/crypto/smc-envelope/SmartContract.cpp index 64e26944..1908b912 100644 --- a/crypto/smc-envelope/SmartContract.cpp +++ b/crypto/smc-envelope/SmartContract.cpp @@ -171,6 +171,10 @@ td::Ref prepare_vm_c7(SmartContract::Args args, td::Ref cod // prev_key_block:BlockId ] : PrevBlocksInfo tuple.push_back(args.prev_blocks_info ? args.prev_blocks_info.value() : vm::StackEntry{}); // prev_block_info } + if (args.config && args.config.value()->get_global_version() >= 6) { + tuple.push_back(args.config.value()->get_unpacked_config_tuple(now)); // unpacked_config_tuple + tuple.push_back(td::zero_refint()); // due_payment + } auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); //LOG(DEBUG) << "SmartContractInfo initialized with " << vm::StackEntry(tuple).to_string(); return vm::make_tuple_ref(std::move(tuple_ref)); @@ -181,7 +185,7 @@ SmartContract::Answer run_smartcont(SmartContract::State state, td::Ref config) { auto gas_credit = gas.gas_credit; - vm::init_op_cp0(debug_enabled); + vm::init_vm(debug_enabled).ensure(); vm::DictionaryBase::get_empty_dictionary(); class Logger : public td::LogInterface { diff --git a/crypto/smc-envelope/SmartContractCode.cpp b/crypto/smc-envelope/SmartContractCode.cpp index d10c4b5c..585450f6 100644 --- a/crypto/smc-envelope/SmartContractCode.cpp +++ b/crypto/smc-envelope/SmartContractCode.cpp @@ -28,6 +28,7 @@ namespace { // WALLET_REVISION = 2; // WALLET2_REVISION = 2; // WALLET3_REVISION = 2; +// WALLET4_REVISION = 2; // HIGHLOAD_WALLET_REVISION = 2; // HIGHLOAD_WALLET2_REVISION = 2; // DNS_REVISION = 1; @@ -92,6 +93,20 @@ const auto& get_map() { "AAXrc52omhpn5jrhf/AABesePaiaGmPmOuFj8ABDbbYHwR7Z5AOAQm1B1tnkA4BTu1E0IEBQNch0x/" "0BNEC2zz4J28QAoAg9HtvpTGX+gAwoXC2CZEw4g8AOiGOETGA8/gzIG6SMHCU0NcLH+IB3yGSAaGSW3/iAAzTB9QC+wAAHssfFMsfEsv/yx/" "0AMntVA=="); + with_tvm_code( + "wallet-v4-r2", + "te6cckECFAEAAtQAART/APSkE/S88sgLAQIBIAIDAgFIBAUE+PKDCNcYINMf0x/THwL4I7vyZO1E0NMf0x/T//" + "QE0VFDuvKhUVG68qIF+QFUEGT5EPKj+AAkpMjLH1JAyx9SMMv/" + "UhD0AMntVPgPAdMHIcAAn2xRkyDXSpbTB9QC+wDoMOAhwAHjACHAAuMAAcADkTDjDQOkyMsfEssfy/" + "8QERITAubQAdDTAyFxsJJfBOAi10nBIJJfBOAC0x8hghBwbHVnvSKCEGRzdHK9sJJfBeAD+kAwIPpEAcjKB8v/" + "ydDtRNCBAUDXIfQEMFyBAQj0Cm+hMbOSXwfgBdM/" + "yCWCEHBsdWe6kjgw4w0DghBkc3RyupJfBuMNBgcCASAICQB4AfoA9AQw+CdvIjBQCqEhvvLgUIIQcGx1Z4MesXCAGFAEywUmzxZY+" + "gIZ9ADLaRfLH1Jgyz8gyYBA+wAGAIpQBIEBCPRZMO1E0IEBQNcgyAHPFvQAye1UAXKwjiOCEGRzdHKDHrFwgBhQBcsFUAPPFiP6AhPLassfyz/" + "JgED7AJJfA+ICASAKCwBZvSQrb2omhAgKBrkPoCGEcNQICEekk30pkQzmkD6f+YN4EoAbeBAUiYcVnzGEAgFYDA0AEbjJftRNDXCx+" + "AA9sp37UTQgQFA1yH0BDACyMoHy//J0AGBAQj0Cm+hMYAIBIA4PABmtznaiaEAga5Drhf/AABmvHfaiaEAQa5DrhY/AAG7SB/" + "oA1NQi+QAFyMoHFcv/ydB3dIAYyMsFywIizxZQBfoCFMtrEszMyXP7AMhAFIEBCPRR8qcCAHCBAQjXGPoA0z/" + "IVCBHgQEI9FHyp4IQbm90ZXB0gBjIywXLAlAGzxZQBPoCFMtqEssfyz/Jc/sAAgBsgQEI1xj6ANM/" + "MFIkgQEI9Fnyp4IQZHN0cnB0gBjIywXLAlAFzxZQA/oCE8tqyx8Syz/Jc/sAAAr0AMntVGliJeU="); return map; }(); return map; @@ -137,9 +152,12 @@ td::Span SmartContractCode::get_revisions(Type type) { static int res[] = {1}; return res; } + case Type::WalletV4: { + static int res[] = {2}; + return res; + } } UNREACHABLE(); - return {}; } td::Result SmartContractCode::validate_revision(Type type, int revision) { @@ -179,9 +197,10 @@ td::Ref SmartContractCode::get_code(Type type, int ext_revision) { return "payment-channel"; case Type::RestrictedWallet: return "restricted-wallet3"; + case Type::WalletV4: + return "wallet-v4"; } UNREACHABLE(); - return ""; }(type); if (revision == -1) { return load(basename).move_as_ok(); diff --git a/crypto/smc-envelope/SmartContractCode.h b/crypto/smc-envelope/SmartContractCode.h index 85be3531..be50d2a1 100644 --- a/crypto/smc-envelope/SmartContractCode.h +++ b/crypto/smc-envelope/SmartContractCode.h @@ -26,7 +26,16 @@ class SmartContractCode { public: static td::Result> load(td::Slice name); - enum Type { WalletV3 = 4, HighloadWalletV1, HighloadWalletV2, ManualDns, Multisig, PaymentChannel, RestrictedWallet }; + enum Type { + WalletV3 = 4, + HighloadWalletV1, + HighloadWalletV2, + ManualDns, + Multisig, + PaymentChannel, + RestrictedWallet, + WalletV4 + }; static td::Span get_revisions(Type type); static td::Result validate_revision(Type type, int revision); static td::Ref get_code(Type type, int revision = 0); diff --git a/crypto/smc-envelope/WalletV4.cpp b/crypto/smc-envelope/WalletV4.cpp new file mode 100644 index 00000000..738fa9c7 --- /dev/null +++ b/crypto/smc-envelope/WalletV4.cpp @@ -0,0 +1,71 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "WalletV4.h" +#include "GenericAccount.h" +#include "SmartContractCode.h" + +#include "vm/boc.h" +#include "vm/cells/CellString.h" +#include "td/utils/base64.h" + +#include + +namespace ton { +td::Result> WalletV4::make_a_gift_message(const td::Ed25519::PrivateKey& private_key, + td::uint32 valid_until, td::Span gifts) const { + CHECK(gifts.size() <= get_max_gifts_size()); + TRY_RESULT(seqno, get_seqno()); + TRY_RESULT(wallet_id, get_wallet_id()); + vm::CellBuilder cb; + cb.store_long(wallet_id, 32).store_long(valid_until, 32).store_long(seqno, 32); + cb.store_long(0, 8); // The only difference with wallet-v3 + + for (auto& gift : gifts) { + td::int32 send_mode = 3; + if (gift.gramms == -1) { + send_mode += 128; + } + if (gift.send_mode > -1) { + send_mode = gift.send_mode; + } + cb.store_long(send_mode, 8).store_ref(create_int_message(gift)); + } + + auto message_outer = cb.finalize(); + auto signature = private_key.sign(message_outer->get_hash().as_slice()).move_as_ok(); + return vm::CellBuilder().store_bytes(signature).append_cellslice(vm::load_cell_slice(message_outer)).finalize(); +} + +td::Ref WalletV4::get_init_data(const InitData& init_data) noexcept { + return vm::CellBuilder() + .store_long(init_data.seqno, 32) + .store_long(init_data.wallet_id, 32) + .store_bytes(init_data.public_key) + .store_zeroes(1) // plugins dict + .finalize(); +} + +td::Result WalletV4::get_wallet_id() const { + return TRY_VM([&]() -> td::Result { + auto answer = run_get_method("get_subwallet_id"); + if (!answer.success) { + return td::Status::Error("get_subwallet_id get method failed"); + } + return static_cast(answer.stack.write().pop_long_range(std::numeric_limits::max())); + }()); +} +} // namespace ton diff --git a/crypto/smc-envelope/WalletV4.h b/crypto/smc-envelope/WalletV4.h new file mode 100644 index 00000000..721e8103 --- /dev/null +++ b/crypto/smc-envelope/WalletV4.h @@ -0,0 +1,46 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#pragma once + +#include "smc-envelope/SmartContract.h" +#include "smc-envelope/WalletInterface.h" +#include "vm/cells.h" +#include "Ed25519.h" +#include "block/block.h" +#include "vm/cells/CellString.h" + +namespace ton { + +struct WalletV4Traits { + using InitData = WalletInterface::DefaultInitData; + + static constexpr unsigned max_message_size = vm::CellString::max_bytes; + static constexpr unsigned max_gifts_size = 4; + static constexpr auto code_type = SmartContractCode::WalletV4; +}; + +class WalletV4 : public WalletBase { + public: + explicit WalletV4(State state) : WalletBase(std::move(state)) { + } + td::Result> make_a_gift_message(const td::Ed25519::PrivateKey& private_key, td::uint32 valid_until, + td::Span gifts) const override; + static td::Ref get_init_data(const InitData& init_data) noexcept; + + td::Result get_wallet_id() const override; +}; +} // namespace ton \ No newline at end of file diff --git a/crypto/test/fift.cpp b/crypto/test/fift.cpp index 049507fd..3f370ee5 100644 --- a/crypto/test/fift.cpp +++ b/crypto/test/fift.cpp @@ -33,7 +33,7 @@ std::string load_test(std::string name) { return td::read_file_str(current_dir() + "fift/" + name).move_as_ok(); } -td::Status run_fift(std::string name, bool expect_error = false, bool preload_fift = true) { +td::Status run_fift(std::string name, bool expect_error = false) { auto res = fift::mem_run_fift(load_test(name)); if (expect_error) { res.ensure_error(); @@ -163,3 +163,7 @@ TEST(Fift, test_bls) { TEST(Fift, test_bls_ops) { run_fift("bls_ops.fif"); } + +TEST(Fift, test_levels) { + run_fift("levels.fif"); +} diff --git a/crypto/test/fift/levels.fif b/crypto/test/fift/levels.fif new file mode 100644 index 00000000..a7d450b6 --- /dev/null +++ b/crypto/test/fift/levels.fif @@ -0,0 +1,75 @@ +"Asm.fif" include +"FiftExt.fif" include + +{ + dup + ."Cell " .dump cr + dup [[ <{ CLEVEL }>s ]] 0 runvmx abort"exitcode != 0" ."Level = " . cr + dup [[ <{ CLEVELMASK }>s ]] 0 runvmx abort"exitcode != 0" ."Level mask = 0b" b. cr + dup dup [[ <{ 0 CHASHI DUP ROT 0 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_0 = " X. cr + dup dup [[ <{ 1 CHASHI DUP ROT 1 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_1 = " X. cr + dup dup [[ <{ 2 CHASHI DUP ROT 2 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_2 = " X. cr + dup dup [[ <{ 3 CHASHI DUP ROT 3 INT CHASHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Hash_3 = " X. cr + dup dup [[ <{ 0 CDEPTHI DUP ROT 0 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_0 = " . cr + dup dup [[ <{ 1 CDEPTHI DUP ROT 1 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_1 = " . cr + dup dup [[ <{ 2 CDEPTHI DUP ROT 2 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_2 = " . cr + dup dup [[ <{ 3 CDEPTHI DUP ROT 3 INT CDEPTHIX EQUAL 55 THROWIFNOT }>s ]] 0 runvmx abort"exitcode != 0" ."Depth_3 = " . cr + drop + cr +} : print-all + +// Ordinary cell of level 0 + ref, b> ref, + ref, +b> +print-all + +// Prunned branch of level 1 +spec +print-all + +// Prunned branch of level 3 +spec +print-all + +// Prunned branch of level 3, mask 0b101 +spec +print-all + +// Tree with the previous cell inside +spec ref, + b> ref, +b> +print-all diff --git a/crypto/test/fift/rist255.fif b/crypto/test/fift/rist255.fif index 630eebb1..ddd9b619 100644 --- a/crypto/test/fift/rist255.fif +++ b/crypto/test/fift/rist255.fif @@ -9,6 +9,9 @@ [[ <{ RIST255_MULBASE DUP RIST255_VALIDATE }>s ]] 0 runvmx abort"Exitcode != 0" @' n . dup (x.) type cr @' ans <> abort"Invalid result" + @' n + [[ <{ 1 INT RIST255_MULBASE SWAP RIST255_MUL DUP RIST255_VALIDATE }>s ]] 0 runvmx abort"Exitcode != 0" + @' ans <> abort"Invalid result" } : test-basepoint 0 0x0000000000000000000000000000000000000000000000000000000000000000 test-basepoint diff --git a/crypto/test/modbigint.cpp b/crypto/test/modbigint.cpp index b34411f3..75051fa6 100644 --- a/crypto/test/modbigint.cpp +++ b/crypto/test/modbigint.cpp @@ -180,7 +180,7 @@ struct MixedRadix { template const MixedRadix& as_shorter() const { - static_assert(M <= N); + static_assert(M <= N,"error"); return *reinterpret_cast*>(this); } @@ -458,7 +458,7 @@ struct ModArray { } template ModArray(const ModArray& other) { - static_assert(M >= N); + static_assert(M >= N,"error"); std::copy(other.a, other.a + N, a); } ModArray(const int* p) : a(p) { @@ -819,7 +819,7 @@ struct ModArray { template const ModArray& as_shorter() const { - static_assert(M <= N); + static_assert(M <= N,"error"); return *reinterpret_cast*>(this); } diff --git a/crypto/test/test-bigint.cpp b/crypto/test/test-bigint.cpp index 7525c83a..a6f6e8d6 100644 --- a/crypto/test/test-bigint.cpp +++ b/crypto/test/test-bigint.cpp @@ -16,12 +16,12 @@ */ #include #include -#include #include #include #include #include #include +#include #include "common/refcnt.hpp" #include "common/bigint.hpp" #include "common/refint.h" @@ -211,7 +211,7 @@ bool coin() { // returns 0 with probability 1/2, 1 with prob. 1/4, ..., k with prob. 1/2^(k+1) int randexp(int max = 63, int min = 0) { - return min + __builtin_clzll(Random() | (1ULL << (63 - max + min))); + return min + td::count_leading_zeroes64(Random() | (1ULL << (63 - max + min))); } void bin_add_small(unsigned char bin[64], long long val, int shift = 0) { @@ -363,7 +363,7 @@ void check_one_int_repr(td::RefInt256 x, int mode, int in_range, const BInt* val if (is_small) { // special check for small (64-bit) values CHECK(x->to_long() == xval); - CHECK((long long)__builtin_bswap64(*(long long*)(bytes + 64 - 8)) == xval); + CHECK((long long)td::bswap64(*(long long*)(bytes + 64 - 8)) == xval); CHECK(in_range); // check sign CHECK(x->sgn() == (xval > 0 ? 1 : (xval < 0 ? -1 : 0))); diff --git a/crypto/test/test-db.cpp b/crypto/test/test-db.cpp index 413d774f..35727ee3 100644 --- a/crypto/test/test-db.cpp +++ b/crypto/test/test-db.cpp @@ -127,12 +127,12 @@ class BenchSha256Low : public td::Benchmark { void run(int n) override { int res = 0; - SHA256_CTX ctx; + td::Sha256State ctx; for (int i = 0; i < n; i++) { - SHA256_Init(&ctx); - SHA256_Update(&ctx, "abcd", 4); + ctx.init(); + ctx.feed("abcd"); unsigned char buf[32]; - SHA256_Final(buf, &ctx); + ctx.extract(td::MutableSlice{buf, 32}); res += buf[0]; } td::do_not_optimize_away(res); diff --git a/crypto/test/test-smartcont.cpp b/crypto/test/test-smartcont.cpp index 673bb758..7f512cea 100644 --- a/crypto/test/test-smartcont.cpp +++ b/crypto/test/test-smartcont.cpp @@ -35,6 +35,7 @@ #include "smc-envelope/SmartContract.h" #include "smc-envelope/SmartContractCode.h" #include "smc-envelope/WalletV3.h" +#include "smc-envelope/WalletV4.h" #include "smc-envelope/HighloadWallet.h" #include "smc-envelope/HighloadWalletV2.h" #include "smc-envelope/PaymentChannel.h" @@ -488,7 +489,7 @@ void do_test_wallet(int revision) { auto address = std::move(res.address); auto iwallet = std::move(res.wallet); auto public_key = priv_key.get_public_key().move_as_ok().as_octet_string(); - ; + check_wallet_state(iwallet, 1, 123, public_key); // lets send a lot of messages @@ -526,6 +527,7 @@ void do_test_wallet() { TEST(Tonlib, Wallet) { do_test_wallet(); + do_test_wallet(); do_test_wallet(); do_test_wallet(); do_test_wallet(); @@ -956,7 +958,7 @@ class MapDns { } return; } - if (!actions.category.is_zero()) { + if (actions.category.is_zero()) { entries_.erase(actions.name); LOG(ERROR) << "CLEAR " << actions.name; if (!actions.actions) { @@ -1001,7 +1003,7 @@ class CheckedDns { explicit CheckedDns(bool check_smc = true, bool check_combine = true) { if (check_smc) { key_ = td::Ed25519::generate_private_key().move_as_ok(); - dns_ = ManualDns::create(ManualDns::create_init_data_fast(key_.value().get_public_key().move_as_ok(), 123)); + dns_ = ManualDns::create(ManualDns::create_init_data_fast(key_.value().get_public_key().move_as_ok(), 123), -1); } if (check_combine) { combined_map_dns_ = MapDns(); @@ -1024,7 +1026,7 @@ class CheckedDns { } return action; }); - auto query = dns_->create_update_query(key_.value(), smc_actions).move_as_ok(); + auto query = dns_->create_update_query(key_.value(), smc_actions, query_id_++).move_as_ok(); CHECK(dns_.write().send_external_message(std::move(query)).code == 0); } map_dns_.update(entries); @@ -1079,6 +1081,7 @@ class CheckedDns { using ManualDns = ton::ManualDns; td::optional key_; td::Ref dns_; + td::uint32 query_id_ = 1; // Query id serve as "valid until", but in tests now() == 0 MapDns map_dns_; td::optional combined_map_dns_; @@ -1092,9 +1095,10 @@ class CheckedDns { } }; -static td::Bits256 intToCat(int x) { - td::Bits256 cat = td::Bits256::zero(); - cat.as_slice().copy_from(td::Slice((char*)&x, sizeof(x))); +static td::Bits256 intToCat(td::uint32 x) { + auto y = td::make_refint(x); + td::Bits256 cat; + y->export_bytes(cat.data(), 32, false); return cat; } @@ -1180,7 +1184,7 @@ TEST(Smartcont, DnsManual) { auto key = td::Ed25519::generate_private_key().move_as_ok(); - auto manual = ManualDns::create(ManualDns::create_init_data_fast(key.get_public_key().move_as_ok(), 123)); + auto manual = ManualDns::create(ManualDns::create_init_data_fast(key.get_public_key().move_as_ok(), 123), -1); CHECK(manual->get_wallet_id().move_as_ok() == 123); auto init_query = manual->create_init_query(key).move_as_ok(); LOG(ERROR) << "A"; diff --git a/crypto/test/vm.cpp b/crypto/test/vm.cpp index 3227f8fa..0f1b0442 100644 --- a/crypto/test/vm.cpp +++ b/crypto/test/vm.cpp @@ -28,7 +28,7 @@ #include "td/utils/StringBuilder.h" std::string run_vm(td::Ref cell) { - vm::init_op_cp0(); + vm::init_vm().ensure(); vm::DictionaryBase::get_empty_dictionary(); class Logger : public td::LogInterface { diff --git a/crypto/tl/tlbc-gen-cpp.cpp b/crypto/tl/tlbc-gen-cpp.cpp index dedec15d..6edd0a12 100644 --- a/crypto/tl/tlbc-gen-cpp.cpp +++ b/crypto/tl/tlbc-gen-cpp.cpp @@ -159,7 +159,6 @@ std::string CppIdentSet::compute_cpp_ident(std::string orig_ident, int count) { } if (!cnt) { os << '_'; - prev_skip = true; } if (count) { os << count; diff --git a/crypto/tl/tlbc.cpp b/crypto/tl/tlbc.cpp index 409ac538..b48bc472 100644 --- a/crypto/tl/tlbc.cpp +++ b/crypto/tl/tlbc.cpp @@ -2252,11 +2252,9 @@ TypeExpr* parse_expr10(Lexer& lex, Constructor& cs, int mode) { } if (op == '>') { std::swap(expr, expr2); - op = '<'; op_name = Less_name; } else if (op == src::_Geq) { std::swap(expr, expr2); - op = src::_Leq; op_name = Leq_name; } auto sym_def = sym::lookup_symbol(op_name, 2); diff --git a/crypto/vm/arithops.cpp b/crypto/vm/arithops.cpp index 7da53567..1d3111b2 100644 --- a/crypto/vm/arithops.cpp +++ b/crypto/vm/arithops.cpp @@ -285,8 +285,11 @@ int exec_divmod(VmState* st, unsigned args, int quiet) { typename td::BigInt256::DoubleInt tmp{*x}, quot; tmp += *w; tmp.mod_div(*y, quot, round_mode); - stack.push_int_quiet(td::make_refint(quot), quiet); - stack.push_int_quiet(td::make_refint(tmp), quiet); + auto q = td::make_refint(quot), r = td::make_refint(tmp); + q.write().normalize(); + r.write().normalize(); + stack.push_int_quiet(std::move(q), quiet); + stack.push_int_quiet(std::move(r), quiet); } else { switch (d) { case 1: @@ -399,6 +402,7 @@ std::string dump_shrmod(CellSlice&, unsigned args, int mode) { if (mode & 1) { os << 'Q'; } + std::string end; switch (args & 12) { case 4: os << "RSHIFT"; @@ -407,17 +411,22 @@ std::string dump_shrmod(CellSlice&, unsigned args, int mode) { os << "MODPOW2"; break; case 12: - os << "RSHIFTMOD"; + os << "RSHIFT"; + end = "MOD"; break; case 0: - os << "ADDRSHIFTMOD"; + os << "ADDRSHIFT"; + end = "MOD"; break; } + if (!(mode & 2)) { + os << end; + } if (round_mode) { os << "FRC"[round_mode]; } if (mode & 2) { - os << ' ' << y; + os << "#" << end << ' ' << y; } return os.str(); } @@ -519,7 +528,7 @@ int exec_mulshrmod(VmState* st, unsigned args, int mode) { if (add) { tmp = *w; } - tmp.add_mul(*x, *y); + tmp.add_mul(*x, *y).normalize(); switch (d) { case 1: tmp.rshift(z, round_mode).normalize(); @@ -553,6 +562,7 @@ std::string dump_mulshrmod(CellSlice&, unsigned args, int mode) { if (mode & 1) { os << 'Q'; } + std::string end; switch (args & 12) { case 4: os << "MULRSHIFT"; @@ -561,15 +571,21 @@ std::string dump_mulshrmod(CellSlice&, unsigned args, int mode) { os << "MULMODPOW2"; break; case 12: - os << "MULRSHIFTMOD"; + os << "MULRSHIFT"; + end = "MOD"; break; case 0: - os << "MULADDRSHIFTMOD"; + os << "MULADDRSHIFT"; + end = "MOD"; break; } if (round_mode) { os << "FRC"[round_mode]; } + if (mode & 2) { + os << "#"; + } + os << end; if (mode & 2) { os << ' ' << y; } @@ -644,18 +660,22 @@ std::string dump_shldivmod(CellSlice&, unsigned args, int mode) { if (mode & 1) { os << "Q"; } + os << "LSHIFT"; + if (mode & 2) { + os << "#"; + } switch (args & 12) { case 4: - os << "LSHIFTDIV"; + os << "DIV"; break; case 8: - os << "LSHIFTMOD"; + os << "MOD"; break; case 12: - os << "LSHIFTDIVMOD"; + os << "DIVMOD"; break; case 0: - os << "LSHIFTADDDIVMOD"; + os << "ADDDIVMOD"; break; } if (round_mode) { diff --git a/crypto/vm/boc.cpp b/crypto/vm/boc.cpp index 0d84c118..3e15f62b 100644 --- a/crypto/vm/boc.cpp +++ b/crypto/vm/boc.cpp @@ -930,7 +930,7 @@ unsigned long long BagOfCells::get_idx_entry_raw(int index) { * */ -td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty) { +td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty, bool allow_nonzero_level) { if (data.empty() && can_be_empty) { return Ref(); } @@ -946,7 +946,7 @@ td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty) { if (root.is_null()) { return td::Status::Error("bag of cells has null root cell (?)"); } - if (root->get_level() != 0) { + if (!allow_nonzero_level && root->get_level() != 0) { return td::Status::Error("bag of cells has a root with non-zero level"); } return std::move(root); diff --git a/crypto/vm/boc.h b/crypto/vm/boc.h index d6a7f9ad..df12816d 100644 --- a/crypto/vm/boc.h +++ b/crypto/vm/boc.h @@ -53,6 +53,7 @@ class NewCellStorageStat { bool operator==(const Stat& other) const { return key() == other.key(); } + Stat(const Stat& other) = default; Stat& operator=(const Stat& other) = default; Stat& operator+=(const Stat& other) { cells += other.cells; @@ -336,7 +337,7 @@ class BagOfCells { std::vector* cell_should_cache); }; -td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty = false); +td::Result> std_boc_deserialize(td::Slice data, bool can_be_empty = false, bool allow_nonzero_level = false); td::Result std_boc_serialize(Ref root, int mode = 0); td::Result>> std_boc_deserialize_multi(td::Slice data, diff --git a/crypto/vm/cellops.cpp b/crypto/vm/cellops.cpp index 9e10e072..94407231 100644 --- a/crypto/vm/cellops.cpp +++ b/crypto/vm/cellops.cpp @@ -892,6 +892,40 @@ int exec_load_special_cell(VmState* st, bool quiet) { Stack& stack = st->get_stack(); VM_LOG(st) << "execute XLOAD" << (quiet ? "Q" : ""); auto cell = stack.pop_cell(); + if (st->get_global_version() >= 5) { + st->register_cell_load(cell->get_hash()); + auto r_loaded_cell = cell->load_cell(); + if (r_loaded_cell.is_error()) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "failed to load cell"}; + } + } + auto loaded_cell = r_loaded_cell.move_as_ok(); + if (loaded_cell.data_cell->is_special()) { + if (loaded_cell.data_cell->special_type() != CellTraits::SpecialType::Library) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "unexpected special cell"}; + } + } + CellSlice cs(std::move(loaded_cell)); + DCHECK(cs.size() == Cell::hash_bits + 8); + cell = st->load_library(cs.data_bits() + 8); + if (cell.is_null()) { + if (quiet) { + stack.push_bool(false); + return 0; + } else { + throw VmError{Excno::cell_und, "failed to load library cell"}; + } + } + } + } stack.push_cell(cell); if (quiet) { stack.push_bool(true); @@ -1357,6 +1391,55 @@ int exec_slice_depth(VmState* st) { return 0; } +int exec_cell_level(VmState* st) { + Stack& stack = st->get_stack(); + VM_LOG(st) << "execute CLEVEL"; + auto cell = stack.pop_cell(); + stack.push_smallint(cell->get_level()); + return 0; +} + +int exec_cell_level_mask(VmState* st) { + Stack& stack = st->get_stack(); + VM_LOG(st) << "execute CLEVELMASK"; + auto cell = stack.pop_cell(); + stack.push_smallint(cell->get_level_mask().get_mask()); + return 0; +} + +int exec_cell_hash_i(VmState* st, unsigned args, bool var) { + unsigned i; + Stack& stack = st->get_stack(); + if (var) { + VM_LOG(st) << "execute CHASHIX"; + i = stack.pop_smallint_range(3); + } else { + i = args & 3; + VM_LOG(st) << "execute CHASHI " << i; + } + auto cell = stack.pop_cell(); + std::array hash = cell->get_hash(i).as_array(); + td::RefInt256 res{true}; + CHECK(res.write().import_bytes(hash.data(), hash.size(), false)); + stack.push_int(std::move(res)); + return 0; +} + +int exec_cell_depth_i(VmState* st, unsigned args, bool var) { + unsigned i; + Stack& stack = st->get_stack(); + if (var) { + VM_LOG(st) << "execute CDEPTHIX"; + i = stack.pop_smallint_range(3); + } else { + i = args & 3; + VM_LOG(st) << "execute CDEPTHI " << i; + } + auto cell = stack.pop_cell(); + stack.push_smallint(cell->get_depth(i)); + return 0; +} + void register_cell_deserialize_ops(OpcodeTable& cp0) { using namespace std::placeholders; cp0.insert(OpcodeInstr::mksimple(0xd0, 8, "CTOS", exec_cell_to_slice)) @@ -1445,7 +1528,13 @@ void register_cell_deserialize_ops(OpcodeTable& cp0) { .insert(OpcodeInstr::mksimple(0xd761, 16, "LDONES", std::bind(exec_load_same, _1, "LDONES", 1))) .insert(OpcodeInstr::mksimple(0xd762, 16, "LDSAME", std::bind(exec_load_same, _1, "LDSAME", -1))) .insert(OpcodeInstr::mksimple(0xd764, 16, "SDEPTH", exec_slice_depth)) - .insert(OpcodeInstr::mksimple(0xd765, 16, "CDEPTH", exec_cell_depth)); + .insert(OpcodeInstr::mksimple(0xd765, 16, "CDEPTH", exec_cell_depth)) + .insert(OpcodeInstr::mksimple(0xd766, 16, "CLEVEL", exec_cell_level)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xd767, 16, "CLEVELMASK", exec_cell_level_mask)->require_version(6)) + .insert(OpcodeInstr::mkfixed(0xd768 >> 2, 14, 2, instr::dump_1c_and(3, "CHASHI "), std::bind(exec_cell_hash_i, _1, _2, false))->require_version(6)) + .insert(OpcodeInstr::mkfixed(0xd76c >> 2, 14, 2, instr::dump_1c_and(3, "CDEPTHI "), std::bind(exec_cell_depth_i, _1, _2, false))->require_version(6)) + .insert(OpcodeInstr::mksimple(0xd770, 16, "CHASHIX ", std::bind(exec_cell_hash_i, _1, 0, true))->require_version(6)) + .insert(OpcodeInstr::mksimple(0xd771, 16, "CDEPTHIX ", std::bind(exec_cell_depth_i, _1, 0, true))->require_version(6)); } void register_cell_ops(OpcodeTable& cp0) { diff --git a/crypto/vm/cells/CellSlice.cpp b/crypto/vm/cells/CellSlice.cpp index e1df5759..ee5f6941 100644 --- a/crypto/vm/cells/CellSlice.cpp +++ b/crypto/vm/cells/CellSlice.cpp @@ -1056,9 +1056,10 @@ std::ostream& operator<<(std::ostream& os, Ref cs_ref) { // If can_be_special is not null, then it is allowed to load special cell // Flag whether loaded cell is actually special will be stored into can_be_special VirtualCell::LoadedCell load_cell_slice_impl(Ref cell, bool* can_be_special) { + auto* vm_state_interface = VmStateInterface::get(); + bool library_loaded = false; while (true) { - auto* vm_state_interface = VmStateInterface::get(); - if (vm_state_interface) { + if (vm_state_interface && !library_loaded) { vm_state_interface->register_cell_load(cell->get_hash()); } auto r_loaded_cell = cell->load_cell(); @@ -1077,6 +1078,12 @@ VirtualCell::LoadedCell load_cell_slice_impl(Ref cell, bool* can_be_specia } else if (loaded_cell.data_cell->is_special()) { if (loaded_cell.data_cell->special_type() == DataCell::SpecialType::Library) { if (vm_state_interface) { + if (vm_state_interface->get_global_version() >= 5) { + if (library_loaded) { + throw VmError{Excno::cell_und, "failed to load library cell: recursive library cells are not allowed"}; + } + library_loaded = true; + } CellSlice cs(std::move(loaded_cell)); DCHECK(cs.size() == Cell::hash_bits + 8); auto library_cell = vm_state_interface->load_library(cs.data_bits() + 8); diff --git a/crypto/vm/db/CellStorage.cpp b/crypto/vm/db/CellStorage.cpp index a1b7365b..303d4650 100644 --- a/crypto/vm/db/CellStorage.cpp +++ b/crypto/vm/db/CellStorage.cpp @@ -27,16 +27,25 @@ namespace vm { namespace { class RefcntCellStorer { public: - RefcntCellStorer(td::int32 refcnt, const DataCell &cell) : refcnt_(refcnt), cell_(cell) { + RefcntCellStorer(td::int32 refcnt, const td::Ref &cell, bool as_boc) + : refcnt_(refcnt), cell_(cell), as_boc_(as_boc) { } template void store(StorerT &storer) const { using td::store; + if (as_boc_) { + td::int32 tag = -1; + store(tag, storer); + store(refcnt_, storer); + td::BufferSlice data = vm::std_boc_serialize(cell_).move_as_ok(); + storer.store_slice(data); + return; + } store(refcnt_, storer); - store(cell_, storer); - for (unsigned i = 0; i < cell_.size_refs(); i++) { - auto cell = cell_.get_ref(i); + store(*cell_, storer); + for (unsigned i = 0; i < cell_->size_refs(); i++) { + auto cell = cell_->get_ref(i); auto level_mask = cell->get_level_mask(); auto level = level_mask.get_level(); td::uint8 x = static_cast(level_mask.get_mask()); @@ -60,7 +69,8 @@ class RefcntCellStorer { private: td::int32 refcnt_; - const DataCell &cell_; + td::Ref cell_; + bool as_boc_; }; class RefcntCellParser { @@ -69,11 +79,17 @@ class RefcntCellParser { } td::int32 refcnt; Ref cell; + bool stored_boc_; template void parse(ParserT &parser, ExtCellCreator &ext_cell_creator) { using ::td::parse; parse(refcnt, parser); + stored_boc_ = false; + if (refcnt == -1) { + stored_boc_ = true; + parse(refcnt, parser); + } if (!need_data_) { return; } @@ -81,6 +97,12 @@ class RefcntCellParser { TRY_STATUS(parser.get_status()); auto size = parser.get_left_len(); td::Slice data = parser.template fetch_string_raw(size); + if (stored_boc_) { + TRY_RESULT(boc, vm::std_boc_deserialize(data, false, true)); + TRY_RESULT(loaded_cell, boc->load_cell()); + cell = std::move(loaded_cell.data_cell); + return td::Status::OK(); + } CellSerializationInfo info; auto cell_data = data; TRY_STATUS(info.init(cell_data, 0 /*ref_byte_size*/)); @@ -122,7 +144,8 @@ class RefcntCellParser { }; } // namespace -CellLoader::CellLoader(std::shared_ptr reader) : reader_(std::move(reader)) { +CellLoader::CellLoader(std::shared_ptr reader, std::function on_load_callback) + : reader_(std::move(reader)), on_load_callback_(std::move(on_load_callback)) { CHECK(reader_); } @@ -145,7 +168,11 @@ td::Result CellLoader::load(td::Slice hash, bool need_da res.refcnt_ = refcnt_cell.refcnt; res.cell_ = std::move(refcnt_cell.cell); + res.stored_boc_ = refcnt_cell.stored_boc_; //CHECK(res.cell_->get_hash() == hash); + if (on_load_callback_) { + on_load_callback_(res); + } return res; } @@ -157,7 +184,7 @@ td::Status CellStorer::erase(td::Slice hash) { return kv_.erase(hash); } -td::Status CellStorer::set(td::int32 refcnt, const DataCell &cell) { - return kv_.set(cell.get_hash().as_slice(), td::serialize(RefcntCellStorer(refcnt, cell))); +td::Status CellStorer::set(td::int32 refcnt, const td::Ref &cell, bool as_boc) { + return kv_.set(cell->get_hash().as_slice(), td::serialize(RefcntCellStorer(refcnt, cell, as_boc))); } } // namespace vm diff --git a/crypto/vm/db/CellStorage.h b/crypto/vm/db/CellStorage.h index b705b531..3106ee16 100644 --- a/crypto/vm/db/CellStorage.h +++ b/crypto/vm/db/CellStorage.h @@ -45,19 +45,21 @@ class CellLoader { Ref cell_; td::int32 refcnt_{0}; + bool stored_boc_{false}; }; - CellLoader(std::shared_ptr reader); + CellLoader(std::shared_ptr reader, std::function on_load_callback = {}); td::Result load(td::Slice hash, bool need_data, ExtCellCreator &ext_cell_creator); private: std::shared_ptr reader_; + std::function on_load_callback_; }; class CellStorer { public: CellStorer(KeyValue &kv); td::Status erase(td::Slice hash); - td::Status set(td::int32 refcnt, const DataCell &cell); + td::Status set(td::int32 refcnt, const td::Ref &cell, bool as_boc); private: KeyValue &kv_; diff --git a/crypto/vm/db/DynamicBagOfCellsDb.cpp b/crypto/vm/db/DynamicBagOfCellsDb.cpp index 0d1d099f..1aa4e0f5 100644 --- a/crypto/vm/db/DynamicBagOfCellsDb.cpp +++ b/crypto/vm/db/DynamicBagOfCellsDb.cpp @@ -210,6 +210,14 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat return td::Status::OK(); } + void set_celldb_compress_depth(td::uint32 value) override { + celldb_compress_depth_ = value; + } + + vm::ExtCellCreator& as_ext_cell_creator() override { + return *this; + } + private: std::unique_ptr loader_; std::vector> to_inc_; @@ -217,6 +225,7 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat CellHashTable hash_table_; std::vector visited_; Stats stats_diff_; + td::uint32 celldb_compress_depth_{0}; static td::NamedThreadSafeCounter::CounterRef get_thread_safe_counter() { static auto res = td::NamedThreadSafeCounter::get_default().get_counter("DynamicBagOfCellsDb"); @@ -443,7 +452,8 @@ class DynamicBagOfCellsDbImpl : public DynamicBagOfCellsDb, private ExtCellCreat guard.dismiss(); } else { auto loaded_cell = info.cell->load_cell().move_as_ok(); - storer.set(info.db_refcnt, *loaded_cell.data_cell); + storer.set(info.db_refcnt, loaded_cell.data_cell, + loaded_cell.data_cell->get_depth() == celldb_compress_depth_ && celldb_compress_depth_ != 0); info.in_db = true; } } diff --git a/crypto/vm/db/DynamicBagOfCellsDb.h b/crypto/vm/db/DynamicBagOfCellsDb.h index 3569208c..fa2b44d2 100644 --- a/crypto/vm/db/DynamicBagOfCellsDb.h +++ b/crypto/vm/db/DynamicBagOfCellsDb.h @@ -64,6 +64,9 @@ class DynamicBagOfCellsDb { // restart with new loader will also reset stats_diff virtual td::Status set_loader(std::unique_ptr loader) = 0; + virtual void set_celldb_compress_depth(td::uint32 value) = 0; + virtual vm::ExtCellCreator& as_ext_cell_creator() = 0; + static std::unique_ptr create(); class AsyncExecutor { diff --git a/crypto/vm/dict.cpp b/crypto/vm/dict.cpp index ac32b38f..c79924d0 100644 --- a/crypto/vm/dict.cpp +++ b/crypto/vm/dict.cpp @@ -21,6 +21,7 @@ #include "vm/cellslice.h" #include "vm/stack.hpp" #include "common/bitstring.h" +#include "td/utils/Random.h" #include "td/utils/bits.h" @@ -2007,7 +2008,7 @@ bool DictionaryFixed::combine_with(DictionaryFixed& dict2) { bool DictionaryFixed::dict_check_for_each(Ref dict, td::BitPtr key_buffer, int n, int total_key_len, const DictionaryFixed::foreach_func_t& foreach_func, - bool invert_first) const { + bool invert_first, bool shuffle) const { if (dict.is_null()) { return true; } @@ -2026,26 +2027,29 @@ bool DictionaryFixed::dict_check_for_each(Ref dict, td::BitPtr key_buffer, key_buffer += l + 1; if (l) { invert_first = false; - } else if (invert_first) { + } + bool invert = shuffle ? td::Random::fast(0, 1) == 1: invert_first; + if (invert) { std::swap(c1, c2); } - key_buffer[-1] = invert_first; + key_buffer[-1] = invert; // recursive check_foreach applied to both children - if (!dict_check_for_each(std::move(c1), key_buffer, n - l - 1, total_key_len, foreach_func)) { + if (!dict_check_for_each(std::move(c1), key_buffer, n - l - 1, total_key_len, foreach_func, false, shuffle)) { return false; } - key_buffer[-1] = !invert_first; - return dict_check_for_each(std::move(c2), key_buffer, n - l - 1, total_key_len, foreach_func); + key_buffer[-1] = !invert; + return dict_check_for_each(std::move(c2), key_buffer, n - l - 1, total_key_len, foreach_func, false, shuffle); } -bool DictionaryFixed::check_for_each(const foreach_func_t& foreach_func, bool invert_first) { +bool DictionaryFixed::check_for_each(const foreach_func_t& foreach_func, bool invert_first, bool shuffle) { force_validate(); if (is_empty()) { return true; } int key_len = get_key_bits(); unsigned char key_buffer[max_key_bytes]; - return dict_check_for_each(get_root_cell(), td::BitPtr{key_buffer}, key_len, key_len, foreach_func, invert_first); + return dict_check_for_each(get_root_cell(), td::BitPtr{key_buffer}, key_len, key_len, foreach_func, invert_first, + shuffle); } static inline bool set_bit(td::BitPtr ptr, bool value = true) { diff --git a/crypto/vm/dict.h b/crypto/vm/dict.h index 978f4d53..c4044963 100644 --- a/crypto/vm/dict.h +++ b/crypto/vm/dict.h @@ -223,7 +223,7 @@ class DictionaryFixed : public DictionaryBase { int get_common_prefix(td::BitPtr buffer, unsigned buffer_len); bool cut_prefix_subdict(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false); Ref extract_prefix_subdict_root(td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false); - bool check_for_each(const foreach_func_t& foreach_func, bool invert_first = false); + bool check_for_each(const foreach_func_t& foreach_func, bool invert_first = false, bool shuffle = false); int filter(filter_func_t check); bool combine_with(DictionaryFixed& dict2, const combine_func_t& combine_func, int mode = 0); bool combine_with(DictionaryFixed& dict2, const simple_combine_func_t& simple_combine_func, int mode = 0); @@ -292,7 +292,7 @@ class DictionaryFixed : public DictionaryBase { std::pair, bool> extract_prefix_subdict_internal(Ref dict, td::ConstBitPtr prefix, int prefix_len, bool remove_prefix = false) const; bool dict_check_for_each(Ref dict, td::BitPtr key_buffer, int n, int total_key_len, - const foreach_func_t& foreach_func, bool invert_first = false) const; + const foreach_func_t& foreach_func, bool invert_first = false, bool shuffle = false) const; std::pair, int> dict_filter(Ref dict, td::BitPtr key, int n, const filter_func_t& check_leaf, int& skip_rest) const; Ref dict_combine_with(Ref dict1, Ref dict2, td::BitPtr key_buffer, int n, int total_key_len, diff --git a/crypto/vm/tonops.cpp b/crypto/vm/tonops.cpp index 0dd15d80..9ce7fe9c 100644 --- a/crypto/vm/tonops.cpp +++ b/crypto/vm/tonops.cpp @@ -35,6 +35,7 @@ #include "openssl/digest.hpp" #include #include "bls.h" +#include "mc-config.h" namespace vm { @@ -67,6 +68,10 @@ int exec_set_gas_generic(VmState* st, long long new_gas_limit) { throw VmNoGas{}; } st->change_gas_limit(new_gas_limit); + if (st->get_stop_on_accept_message()) { + VM_LOG(st) << "External message is accepted, stopping TVM"; + return st->jump(td::Ref{true, 0}); + } return 0; } @@ -101,7 +106,7 @@ void register_basic_gas_ops(OpcodeTable& cp0) { using namespace std::placeholders; cp0.insert(OpcodeInstr::mksimple(0xf800, 16, "ACCEPT", exec_accept)) .insert(OpcodeInstr::mksimple(0xf801, 16, "SETGASLIMIT", exec_set_gas_limit)) - .insert(OpcodeInstr::mksimple(0xf802, 16, "GASCONSUMED", exec_gas_consumed)->require_version(4)) + .insert(OpcodeInstr::mksimple(0xf807, 16, "GASCONSUMED", exec_gas_consumed)->require_version(4)) .insert(OpcodeInstr::mksimple(0xf80f, 16, "COMMIT", exec_commit)); } @@ -118,6 +123,20 @@ static const StackEntry& get_param(VmState* st, unsigned idx) { return tuple_index(t1, idx); } +// ConfigParams: 18 (only one entry), 19, 20, 21, 24, 25, 43 +static td::Ref get_unpacked_config_param(VmState* st, unsigned idx) { + auto tuple = st->get_c7(); + auto t1 = tuple_index(tuple, 0).as_tuple_range(255); + if (t1.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a tuple"}; + } + auto t2 = tuple_index(t1, 14).as_tuple_range(255); + if (t2.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a tuple"}; + } + return tuple_index(t2, idx).as_slice(); +} + int exec_get_param(VmState* st, unsigned idx, const char* name) { if (name) { VM_LOG(st) << "execute " << name; @@ -228,20 +247,150 @@ int exec_get_prev_blocks_info(VmState* st, unsigned idx, const char* name) { } int exec_get_global_id(VmState* st) { - Ref config = get_param(st, 9).as_cell(); - if (config.is_null()) { - throw VmError{Excno::type_chk, "intermediate value is not a cell"}; + VM_LOG(st) << "execute GLOBALID"; + if (st->get_global_version() >= 6) { + Ref cs = get_unpacked_config_param(st, 1); + if (cs.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a slice"}; + } + if (cs->size() < 32) { + throw VmError{Excno::cell_und, "invalid global-id config"}; + } + st->get_stack().push_smallint(cs->prefetch_long(32)); + } else { + Ref config = get_param(st, 19).as_cell(); + if (config.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a cell"}; + } + Dictionary config_dict{std::move(config), 32}; + Ref cell = config_dict.lookup_ref(td::BitArray<32>{19}); + if (cell.is_null()) { + throw VmError{Excno::unknown, "invalid global-id config"}; + } + CellSlice cs = load_cell_slice(cell); + if (cs.size() < 32) { + throw VmError{Excno::unknown, "invalid global-id config"}; + } + st->get_stack().push_smallint(cs.fetch_long(32)); } - Dictionary config_dict{std::move(config), 32}; - Ref cell = config_dict.lookup_ref(td::BitArray<32>{19}); - if (cell.is_null()) { - throw VmError{Excno::unknown, "invalid global-id config"}; + return 0; +} + +static block::GasLimitsPrices get_gas_prices(VmState* st, bool is_masterchain) { + Ref cs = get_unpacked_config_param(st, is_masterchain ? 2 : 3); + if (cs.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a slice"}; } - CellSlice cs = load_cell_slice(cell); - if (cs.size() < 32) { - throw VmError{Excno::unknown, "invalid global-id config"}; + auto r_prices = block::Config::do_get_gas_limits_prices(*cs, is_masterchain ? 20 : 21); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; } - st->get_stack().push_smallint(cs.fetch_long(32)); + return r_prices.move_as_ok(); +} + +static block::MsgPrices get_msg_prices(VmState* st, bool is_masterchain) { + Ref cs = get_unpacked_config_param(st, is_masterchain ? 4 : 5); + if (cs.is_null()) { + throw VmError{Excno::type_chk, "intermediate value is not a slice"}; + } + auto r_prices = block::Config::do_get_msg_prices(*cs, is_masterchain ? 24 : 25); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; + } + return r_prices.move_as_ok(); +} + +int exec_get_gas_fee(VmState* st) { + VM_LOG(st) << "execute GETGASFEE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 gas = stack.pop_long_range(std::numeric_limits::max(), 0); + block::GasLimitsPrices prices = get_gas_prices(st, is_masterchain); + stack.push_int(prices.compute_gas_price(gas)); + return 0; +} + +int exec_get_storage_fee(VmState* st) { + VM_LOG(st) << "execute GETSTORAGEFEE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::int64 delta = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 bits = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 cells = stack.pop_long_range(std::numeric_limits::max(), 0); + Ref cs = get_unpacked_config_param(st, 0); + if (cs.is_null()) { + // null means tat no StoragePrices is active, so the price is 0 + stack.push_smallint(0); + return 0; + } + auto r_prices = block::Config::do_get_one_storage_prices(*cs); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; + } + block::StoragePrices prices = r_prices.move_as_ok(); + td::RefInt256 total; + if (is_masterchain) { + total = td::make_refint(cells) * prices.mc_cell_price; + total += td::make_refint(bits) * prices.mc_bit_price; + } else { + total = td::make_refint(cells) * prices.cell_price; + total += td::make_refint(bits) * prices.bit_price; + } + total *= delta; + stack.push_int(td::rshift(total, 16, 1)); + return 0; +} + +int exec_get_forward_fee(VmState* st) { + VM_LOG(st) << "execute GETFORWARDFEE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 bits = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 cells = stack.pop_long_range(std::numeric_limits::max(), 0); + block::MsgPrices prices = get_msg_prices(st, is_masterchain); + stack.push_int(prices.compute_fwd_fees256(cells, bits)); + return 0; +} + +int exec_get_precompiled_gas(VmState* st) { + VM_LOG(st) << "execute GETPRECOMPILEDGAS"; + Stack& stack = st->get_stack(); + stack.push_null(); + return 0; +} + +int exec_get_original_fwd_fee(VmState* st) { + VM_LOG(st) << "execute GETORIGINALFWDFEE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::RefInt256 fwd_fee = stack.pop_int_finite(); + if (fwd_fee->sgn() < 0) { + throw VmError{Excno::range_chk, "fwd_fee is negative"}; + } + block::MsgPrices prices = get_msg_prices(st, is_masterchain); + stack.push_int(td::muldiv(fwd_fee, td::make_refint(1 << 16), td::make_refint((1 << 16) - prices.first_frac))); + return 0; +} + +int exec_get_gas_fee_simple(VmState* st) { + VM_LOG(st) << "execute GETGASFEESIMPLE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 gas = stack.pop_long_range(std::numeric_limits::max(), 0); + block::GasLimitsPrices prices = get_gas_prices(st, is_masterchain); + stack.push_int(td::rshift(td::make_refint(prices.gas_price) * gas, 16, 1)); + return 0; +} + +int exec_get_forward_fee_simple(VmState* st) { + VM_LOG(st) << "execute GETFORWARDFEESIMPLE"; + Stack& stack = st->get_stack(); + bool is_masterchain = stack.pop_bool(); + td::uint64 bits = stack.pop_long_range(std::numeric_limits::max(), 0); + td::uint64 cells = stack.pop_long_range(std::numeric_limits::max(), 0); + block::MsgPrices prices = get_msg_prices(st, is_masterchain); + stack.push_int(td::rshift(td::make_refint(prices.bit_price) * bits + td::make_refint(prices.cell_price) * cells, 16, + 1)); // divide by 2^16 with ceil rounding return 0; } @@ -259,13 +408,21 @@ void register_ton_config_ops(OpcodeTable& cp0) { .insert(OpcodeInstr::mksimple(0xf82b, 16, "INCOMINGVALUE", std::bind(exec_get_param, _1, 11, "INCOMINGVALUE"))) .insert(OpcodeInstr::mksimple(0xf82c, 16, "STORAGEFEES", std::bind(exec_get_param, _1, 12, "STORAGEFEES"))) .insert(OpcodeInstr::mksimple(0xf82d, 16, "PREVBLOCKSINFOTUPLE", std::bind(exec_get_param, _1, 13, "PREVBLOCKSINFOTUPLE"))) - .insert(OpcodeInstr::mkfixedrange(0xf82e, 0xf830, 16, 4, instr::dump_1c("GETPARAM "), exec_get_var_param)) + .insert(OpcodeInstr::mksimple(0xf82e, 16, "UNPACKEDCONFIGTUPLE", std::bind(exec_get_param, _1, 14, "UNPACKEDCONFIGTUPLE"))) + .insert(OpcodeInstr::mksimple(0xf82f, 16, "DUEPAYMENT", std::bind(exec_get_param, _1, 15, "DUEPAYMENT"))) .insert(OpcodeInstr::mksimple(0xf830, 16, "CONFIGDICT", exec_get_config_dict)) .insert(OpcodeInstr::mksimple(0xf832, 16, "CONFIGPARAM", std::bind(exec_get_config_param, _1, false))) .insert(OpcodeInstr::mksimple(0xf833, 16, "CONFIGOPTPARAM", std::bind(exec_get_config_param, _1, true))) .insert(OpcodeInstr::mksimple(0xf83400, 24, "PREVMCBLOCKS", std::bind(exec_get_prev_blocks_info, _1, 0, "PREVMCBLOCKS"))->require_version(4)) .insert(OpcodeInstr::mksimple(0xf83401, 24, "PREVKEYBLOCK", std::bind(exec_get_prev_blocks_info, _1, 1, "PREVKEYBLOCK"))->require_version(4)) .insert(OpcodeInstr::mksimple(0xf835, 16, "GLOBALID", exec_get_global_id)->require_version(4)) + .insert(OpcodeInstr::mksimple(0xf836, 16, "GETGASFEE", exec_get_gas_fee)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf837, 16, "GETSTORAGEFEE", exec_get_storage_fee)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf838, 16, "GETFORWARDFEE", exec_get_forward_fee)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf839, 16, "GETPRECOMPILEDGAS", exec_get_precompiled_gas)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf83a, 16, "GETORIGINALFWDFEE", exec_get_original_fwd_fee)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf83b, 16, "GETGASFEESIMPLE", exec_get_gas_fee_simple)->require_version(6)) + .insert(OpcodeInstr::mksimple(0xf83c, 16, "GETFORWARDFEESIMPLE", exec_get_forward_fee_simple)->require_version(6)) .insert(OpcodeInstr::mksimple(0xf840, 16, "GETGLOBVAR", exec_get_global_var)) .insert(OpcodeInstr::mkfixedrange(0xf841, 0xf860, 16, 5, instr::dump_1c_and(31, "GETGLOB "), exec_get_global)) .insert(OpcodeInstr::mksimple(0xf860, 16, "SETGLOBVAR", exec_set_global_var)) @@ -620,7 +777,6 @@ int exec_ristretto255_from_hash(VmState* st) { if (!x2->export_bytes(xb + 32, 32, false)) { throw VmError{Excno::range_chk, "x2 must fit in an unsigned 256-bit integer"}; } - CHECK(sodium_init() >= 0); crypto_core_ristretto255_from_hash(rb, xb); td::RefInt256 r{true}; CHECK(r.write().import_bytes(rb, 32, false)); @@ -633,8 +789,7 @@ int exec_ristretto255_validate(VmState* st, bool quiet) { Stack& stack = st->get_stack(); auto x = stack.pop_int(); st->consume_gas(VmState::rist255_validate_gas_price); - unsigned char xb[64]; - CHECK(sodium_init() >= 0); + unsigned char xb[32]; if (!x->export_bytes(xb, 32, false) || !crypto_core_ristretto255_is_valid_point(xb)) { if (quiet) { stack.push_bool(false); @@ -656,7 +811,6 @@ int exec_ristretto255_add(VmState* st, bool quiet) { auto x = stack.pop_int(); st->consume_gas(VmState::rist255_add_gas_price); unsigned char xb[32], yb[32], rb[32]; - CHECK(sodium_init() >= 0); if (!x->export_bytes(xb, 32, false) || !y->export_bytes(yb, 32, false) || crypto_core_ristretto255_add(rb, xb, yb)) { if (quiet) { stack.push_bool(false); @@ -681,7 +835,6 @@ int exec_ristretto255_sub(VmState* st, bool quiet) { auto x = stack.pop_int(); st->consume_gas(VmState::rist255_add_gas_price); unsigned char xb[32], yb[32], rb[32]; - CHECK(sodium_init() >= 0); if (!x->export_bytes(xb, 32, false) || !y->export_bytes(yb, 32, false) || crypto_core_ristretto255_sub(rb, xb, yb)) { if (quiet) { stack.push_bool(false); @@ -719,17 +872,20 @@ int exec_ristretto255_mul(VmState* st, bool quiet) { auto n = stack.pop_int() % get_ristretto256_l(); auto x = stack.pop_int(); st->consume_gas(VmState::rist255_mul_gas_price); - unsigned char xb[32], nb[32], rb[32]; - memset(rb, 255, sizeof(rb)); - CHECK(sodium_init() >= 0); - if (!x->export_bytes(xb, 32, false) || !export_bytes_little(n, nb) || crypto_scalarmult_ristretto255(rb, nb, xb)) { - if (std::all_of(rb, rb + 32, [](unsigned char c) { return c == 255; })) { - if (quiet) { - stack.push_bool(false); - return 0; - } - throw VmError{Excno::range_chk, "invalid x or n"}; + if (n->sgn() == 0) { + stack.push_smallint(0); + if (quiet) { + stack.push_bool(true); } + return 0; + } + unsigned char xb[32], nb[32], rb[32]; + if (!x->export_bytes(xb, 32, false) || !export_bytes_little(n, nb) || crypto_scalarmult_ristretto255(rb, nb, xb)) { + if (quiet) { + stack.push_bool(false); + return 0; + } + throw VmError{Excno::range_chk, "invalid x or n"}; } td::RefInt256 r{true}; CHECK(r.write().import_bytes(rb, 32, false)); @@ -747,7 +903,6 @@ int exec_ristretto255_mul_base(VmState* st, bool quiet) { st->consume_gas(VmState::rist255_mulbase_gas_price); unsigned char nb[32], rb[32]; memset(rb, 255, sizeof(rb)); - CHECK(sodium_init() >= 0); if (!export_bytes_little(n, nb) || crypto_scalarmult_ristretto255_base(rb, nb)) { if (std::all_of(rb, rb + 32, [](unsigned char c) { return c == 255; })) { if (quiet) { @@ -833,7 +988,7 @@ int exec_bls_verify(VmState* st) { VM_LOG(st) << "execute BLS_VERIFY"; Stack& stack = st->get_stack(); stack.check_underflow(3); - st->consume_gas(st->bls_verify_gas_price); + st->consume_gas(VmState::bls_verify_gas_price); bls::P2 sig = slice_to_bls_p2(*stack.pop_cellslice()); td::BufferSlice msg = slice_to_bls_msg(*stack.pop_cellslice()); bls::P1 pub = slice_to_bls_p1(*stack.pop_cellslice()); @@ -845,8 +1000,7 @@ int exec_bls_aggregate(VmState* st) { VM_LOG(st) << "execute BLS_AGGREGATE"; Stack& stack = st->get_stack(); int n = stack.pop_smallint_range(stack.depth() - 1, 1); - st->consume_gas( - std::max(0LL, VmState::bls_aggregate_base_gas_price + (long long)n * VmState::bls_aggregate_element_gas_price)); + st->consume_gas(VmState::bls_aggregate_base_gas_price + (long long)n * VmState::bls_aggregate_element_gas_price); std::vector sigs(n); for (int i = n - 1; i >= 0; --i) { sigs[i] = slice_to_bls_p2(*stack.pop_cellslice()); @@ -1591,17 +1745,39 @@ int exec_send_message(VmState* st) { } bool is_masterchain = parse_addr_workchain(*my_addr) == -1 || (!ext_msg && parse_addr_workchain(*dest) == -1); - Ref config_dict = get_param(st, 9).as_cell(); - Dictionary config{config_dict, 32}; - Ref prices_cell = config.lookup_ref(td::BitArray<32>{is_masterchain ? 24 : 25}); - block::gen::MsgForwardPrices::Record prices; - if (prices_cell.is_null() || !tlb::unpack_cell(std::move(prices_cell), prices)) { + td::Ref prices_cs; + if (st->get_global_version() >= 6) { + prices_cs = get_unpacked_config_param(st, is_masterchain ? 4 : 5); + } else { + Ref config_dict = get_param(st, 9).as_cell(); + Dictionary config{config_dict, 32}; + Ref prices_cell = config.lookup_ref(td::BitArray<32>{is_masterchain ? 24 : 25}); + if (prices_cell.not_null()) { + prices_cs = load_cell_slice_ref(prices_cell); + } + } + if (prices_cs.is_null()) { throw VmError{Excno::unknown, "invalid prices config"}; } + auto r_prices = block::Config::do_get_msg_prices(*prices_cs, is_masterchain ? 24 : 25); + if (r_prices.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_prices.error().message()}; + } + block::MsgPrices prices = r_prices.move_as_ok(); // msg_fwd_fees = (lump_price + ceil((bit_price * msg.bits + cell_price * msg.cells)/2^16)) nanograms // bits in the root cell of a message are not included in msg.bits (lump_price pays for them) - vm::VmStorageStat stat(1 << 13); + td::uint64 max_cells; + if (st->get_global_version() >= 6) { + auto r_size_limits_config = block::Config::do_get_size_limits_config(get_unpacked_config_param(st, 6)); + if (r_size_limits_config.is_error()) { + throw VmError{Excno::cell_und, PSTRING() << "cannot parse config: " << r_size_limits_config.error().message()}; + } + max_cells = r_size_limits_config.ok().max_msg_cells; + } else { + max_cells = 1 << 13; + } + vm::VmStorageStat stat(max_cells); CellSlice cs = load_cell_slice(msg_cell); cs.skip_first(cs.size()); stat.add_storage(cs); @@ -1649,7 +1825,7 @@ int exec_send_message(VmState* st) { if (ihr_disabled) { ihr_fee_short = 0; } else { - ihr_fee_short = td::uint128(fwd_fee_short).mult(prices.ihr_price_factor).shr(16).lo(); + ihr_fee_short = td::uint128(fwd_fee_short).mult(prices.ihr_factor).shr(16).lo(); } fwd_fee = td::RefInt256{true, fwd_fee_short}; ihr_fee = td::RefInt256{true, ihr_fee_short}; diff --git a/crypto/vm/vm.cpp b/crypto/vm/vm.cpp index 4d3e67af..3f595a00 100644 --- a/crypto/vm/vm.cpp +++ b/crypto/vm/vm.cpp @@ -21,6 +21,8 @@ #include "vm/dict.h" #include "vm/log.h" #include "vm/vm.h" +#include "cp0.h" +#include namespace vm { @@ -770,4 +772,15 @@ void VmState::restore_parent_vm(int res) { } } +td::Status init_vm(bool enable_debug) { + if (!init_op_cp0(enable_debug)) { + return td::Status::Error("Failed to init TVM: failed to init cp0"); + } + auto code = sodium_init(); + if (code < 0) { + return td::Status::Error(PSTRING() << "Failed to init TVM: sodium_init, code=" << code); + } + return td::Status::OK(); +} + } // namespace vm diff --git a/crypto/vm/vm.h b/crypto/vm/vm.h index e3fda318..e5cca026 100644 --- a/crypto/vm/vm.h +++ b/crypto/vm/vm.h @@ -98,6 +98,7 @@ class VmState final : public VmStateInterface { td::HashSet loaded_cells; int stack_trace{0}, debug_off{0}; bool chksig_always_succeed{false}; + bool stop_on_accept_message{false}; td::optional missing_library; td::uint16 max_data_depth = 512; // Default value int global_version{0}; @@ -339,7 +340,7 @@ class VmState final : public VmStateInterface { void preclear_cr(const ControlRegs& save) { cr &= save; } - int get_global_version() const { + int get_global_version() const override { return global_version; } void set_global_version(int version) { @@ -381,6 +382,12 @@ class VmState final : public VmStateInterface { bool get_chksig_always_succeed() const { return chksig_always_succeed; } + void set_stop_on_accept_message(bool flag) { + stop_on_accept_message = flag; + } + bool get_stop_on_accept_message() const { + return stop_on_accept_message; + } Ref ref_to_cont(Ref cell) const { return td::make_ref(load_cell_slice_ref(std::move(cell)), get_cp()); } @@ -423,4 +430,6 @@ int run_vm_code(Ref _code, Stack& _stack, int flags = 0, Ref* d Ref lookup_library_in(td::ConstBitPtr key, Ref lib_root); +td::Status init_vm(bool enable_debug = false); + } // namespace vm diff --git a/crypto/vm/vmstate.h b/crypto/vm/vmstate.h index a81a4e78..0d6c3fdf 100644 --- a/crypto/vm/vmstate.h +++ b/crypto/vm/vmstate.h @@ -19,6 +19,7 @@ #pragma once #include "common/refcnt.hpp" #include "vm/cells.h" +#include "common/global-version.h" #include "td/utils/Context.h" @@ -38,6 +39,9 @@ class VmStateInterface : public td::Context { virtual bool register_op(int op_units = 1) { return true; }; + virtual int get_global_version() const { + return ton::SUPPORTED_VERSION; + } }; } // namespace vm diff --git a/dht-server/CMakeLists.txt b/dht-server/CMakeLists.txt index 889b3f30..6daac033 100644 --- a/dht-server/CMakeLists.txt +++ b/dht-server/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/dht-server/dht-server.cpp b/dht-server/dht-server.cpp index 44228bf3..49d2bfbc 100644 --- a/dht-server/dht-server.cpp +++ b/dht-server/dht-server.cpp @@ -574,6 +574,12 @@ void DhtServer::load_config(td::Promise promise) { config_file_ = db_root_ + "/config.json"; } auto conf_data_R = td::read_file(config_file_); + if (conf_data_R.is_error()) { + conf_data_R = td::read_file(temp_config_file()); + if (conf_data_R.is_ok()) { + td::rename(temp_config_file(), config_file_).ensure(); + } + } if (conf_data_R.is_error()) { auto P = td::PromiseCreator::lambda( [name = local_config_, new_name = config_file_, promise = std::move(promise)](td::Result R) { @@ -622,12 +628,15 @@ void DhtServer::load_config(td::Promise promise) { void DhtServer::write_config(td::Promise promise) { auto s = td::json_encode(td::ToJson(*config_.tl().get()), true); - auto S = td::write_file(config_file_, s); - if (S.is_ok()) { - promise.set_value(td::Unit()); - } else { + auto S = td::write_file(temp_config_file(), s); + if (S.is_error()) { + td::unlink(temp_config_file()).ignore(); promise.set_error(std::move(S)); + return; } + td::unlink(config_file_).ignore(); + TRY_STATUS_PROMISE(promise, td::rename(temp_config_file(), config_file_)); + promise.set_value(td::Unit()); } td::Promise DhtServer::get_key_promise(td::MultiPromise::InitGuard &ig) { diff --git a/dht-server/dht-server.hpp b/dht-server/dht-server.hpp index bf815202..7c9e5619 100644 --- a/dht-server/dht-server.hpp +++ b/dht-server/dht-server.hpp @@ -109,6 +109,9 @@ class DhtServer : public td::actor::Actor { std::string local_config_ = ""; std::string global_config_ = "ton-global.config"; std::string config_file_; + std::string temp_config_file() const { + return config_file_ + ".tmp"; + } std::string db_root_ = "/var/ton-work/db/"; diff --git a/dht/CMakeLists.txt b/dht/CMakeLists.txt index e50a7497..95ee7069 100644 --- a/dht/CMakeLists.txt +++ b/dht/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/dht/dht-in.hpp b/dht/dht-in.hpp index 59ce2184..c2d20455 100644 --- a/dht/dht-in.hpp +++ b/dht/dht-in.hpp @@ -155,10 +155,7 @@ class DhtMemberImpl : public DhtMember { } } - void add_full_node(DhtKeyId id, DhtNode node) override { - add_full_node_impl(id, std::move(node)); - } - void add_full_node_impl(DhtKeyId id, DhtNode node, bool set_active = false); + void add_full_node(DhtKeyId id, DhtNode node, bool set_active) override; adnl::AdnlNodeIdShort get_id() const override { return id_; diff --git a/dht/dht-query.cpp b/dht/dht-query.cpp index bc61242d..b84ef8c3 100644 --- a/dht/dht-query.cpp +++ b/dht/dht-query.cpp @@ -34,24 +34,33 @@ namespace ton { namespace dht { void DhtQuery::send_queries() { + while (pending_queries_.size() > k_ * 2) { + pending_queries_.erase(--pending_queries_.end()); + } VLOG(DHT_EXTRA_DEBUG) << this << ": sending new queries. active=" << active_queries_ << " max_active=" << a_; - while (pending_ids_.size() > 0 && active_queries_ < a_) { + while (pending_queries_.size() > 0 && active_queries_ < a_) { + auto id_xor = *pending_queries_.begin(); + if (result_list_.size() == k_ && *result_list_.rbegin() < id_xor) { + break; + } active_queries_++; - auto id_xor = *pending_ids_.begin(); auto id = id_xor ^ key_; VLOG(DHT_EXTRA_DEBUG) << this << ": sending " << get_name() << " query to " << id; - pending_ids_.erase(id_xor); + pending_queries_.erase(id_xor); - auto it = list_.find(id_xor); - CHECK(it != list_.end()); - td::actor::send_closure(adnl_, &adnl::Adnl::add_peer, get_src(), it->second.adnl_id(), it->second.addr_list()); + auto it = nodes_.find(id_xor); + CHECK(it != nodes_.end()); + td::actor::send_closure(adnl_, &adnl::Adnl::add_peer, get_src(), it->second.node.adnl_id(), + it->second.node.addr_list()); send_one_query(id.to_adnl()); } if (active_queries_ == 0) { - CHECK(pending_ids_.size() == 0); + pending_queries_.clear(); DhtNodesList list; - for (auto &node : list_) { - list.push_back(std::move(node.second)); + for (auto id_xor : result_list_) { + auto it = nodes_.find(id_xor); + CHECK(it != nodes_.end()); + list.push_back(it->second.node.clone()); } CHECK(list.size() <= k_); VLOG(DHT_EXTRA_DEBUG) << this << ": finalizing " << get_name() << " query. List size=" << list.size(); @@ -65,30 +74,32 @@ void DhtQuery::add_nodes(DhtNodesList list) { for (auto &node : list.list()) { auto id = node.get_key(); auto id_xor = key_ ^ id; - if (list_.find(id_xor) != list_.end()) { + if (nodes_.find(id_xor) != nodes_.end()) { continue; } - td::actor::send_closure(node_, &DhtMember::add_full_node, id, node.clone()); + VLOG(DHT_EXTRA_DEBUG) << this << ": " << get_name() << " query: adding " << id << " key"; + td::actor::send_closure(node_, &DhtMember::add_full_node, id, node.clone(), false); + nodes_[id_xor].node = std::move(node); + pending_queries_.insert(id_xor); + } +} - DhtKeyId last_id_xor; - if (list_.size() > 0) { - last_id_xor = list_.rbegin()->first; +void DhtQuery::finish_query(adnl::AdnlNodeIdShort id, bool success) { + active_queries_--; + CHECK(active_queries_ <= k_); + auto id_xor = key_ ^ DhtKeyId(id); + if (success) { + result_list_.insert(id_xor); + if (result_list_.size() > k_) { + result_list_.erase(--result_list_.end()); } - - if (list_.size() < k_ || id_xor < last_id_xor) { - list_[id_xor] = std::move(node); - pending_ids_.insert(id_xor); - if (list_.size() > k_) { - CHECK(id_xor != last_id_xor); - VLOG(DHT_EXTRA_DEBUG) << this << ": " << get_name() << " query: replacing " << (last_id_xor ^ key_) - << " key with " << id; - pending_ids_.erase(last_id_xor); - list_.erase(last_id_xor); - } else { - VLOG(DHT_EXTRA_DEBUG) << this << ": " << get_name() << " query: adding " << id << " key"; - } + } else { + NodeInfo &info = nodes_[id_xor]; + if (++info.failed_attempts < MAX_ATTEMPTS) { + pending_queries_.insert(id_xor); } } + send_queries(); } void DhtQueryFindNodes::send_one_query(adnl::AdnlNodeIdShort id) { @@ -111,7 +122,7 @@ void DhtQueryFindNodes::send_one_query(adnl::AdnlNodeIdShort id) { void DhtQueryFindNodes::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed find nodes query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } @@ -122,7 +133,7 @@ void DhtQueryFindNodes::on_result(td::Result R, adnl::AdnlNodeI } else { add_nodes(DhtNodesList{Res.move_as_ok(), our_network_id()}); } - finish_query(); + finish_query(dst); } void DhtQueryFindNodes::finish(DhtNodesList list) { @@ -166,14 +177,14 @@ void DhtQueryFindValue::send_one_query_nodes(adnl::AdnlNodeIdShort id) { void DhtQueryFindValue::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed find value query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto Res = fetch_tl_object(R.move_as_ok(), true); if (Res.is_error()) { VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.findValue query from " << dst << ": " << Res.move_as_error(); - finish_query(); + finish_query(dst, false); return; } @@ -210,26 +221,26 @@ void DhtQueryFindValue::on_result(td::Result R, adnl::AdnlNodeI } else if (send_get_nodes) { send_one_query_nodes(dst); } else { - finish_query(); + finish_query(dst); } } void DhtQueryFindValue::on_result_nodes(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed find nodes query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto Res = fetch_tl_object(R.move_as_ok(), true); if (Res.is_error()) { VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.findNodes query from " << dst << ": " << Res.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto r = Res.move_as_ok(); add_nodes(DhtNodesList{create_tl_object(std::move(r->nodes_)), our_network_id()}); - finish_query(); + finish_query(dst); } void DhtQueryFindValue::finish(DhtNodesList list) { @@ -422,14 +433,14 @@ void DhtQueryRequestReversePing::send_one_query(adnl::AdnlNodeIdShort id) { void DhtQueryRequestReversePing::on_result(td::Result R, adnl::AdnlNodeIdShort dst) { if (R.is_error()) { VLOG(DHT_INFO) << this << ": failed reverse ping query " << get_src() << "->" << dst << ": " << R.move_as_error(); - finish_query(); + finish_query(dst, false); return; } auto Res = fetch_tl_object(R.move_as_ok(), true); if (Res.is_error()) { VLOG(DHT_WARNING) << this << ": dropping incorrect answer on dht.requestReversePing query from " << dst << ": " << Res.move_as_error(); - finish_query(); + finish_query(dst, false); return; } @@ -441,7 +452,7 @@ void DhtQueryRequestReversePing::on_result(td::Result R, adnl:: }, [&](ton_api::dht_clientNotFound &v) { add_nodes(DhtNodesList{std::move(v.nodes_), our_network_id()}); - finish_query(); + finish_query(dst); })); } diff --git a/dht/dht-query.hpp b/dht/dht-query.hpp index c1db0a0e..e4740361 100644 --- a/dht/dht-query.hpp +++ b/dht/dht-query.hpp @@ -63,11 +63,7 @@ class DhtQuery : public td::actor::Actor { } void send_queries(); void add_nodes(DhtNodesList list); - void finish_query() { - active_queries_--; - CHECK(active_queries_ <= k_); - send_queries(); - } + void finish_query(adnl::AdnlNodeIdShort id, bool success = true); DhtKeyId get_key() const { return key_; } @@ -88,16 +84,22 @@ class DhtQuery : public td::actor::Actor { virtual std::string get_name() const = 0; private: + struct NodeInfo { + DhtNode node; + int failed_attempts = 0; + }; DhtMember::PrintId print_id_; adnl::AdnlNodeIdShort src_; - std::map list_; - std::set pending_ids_; + std::map nodes_; + std::set result_list_, pending_queries_; td::uint32 k_; td::uint32 a_; td::int32 our_network_id_; td::actor::ActorId node_; td::uint32 active_queries_ = 0; + static const int MAX_ATTEMPTS = 1; + protected: td::actor::ActorId adnl_; }; diff --git a/dht/dht.cpp b/dht/dht.cpp index e1e20d45..8d7b02b7 100644 --- a/dht/dht.cpp +++ b/dht/dht.cpp @@ -57,7 +57,7 @@ td::Result> Dht::create(adnl::AdnlNodeIdShort id, std:: for (auto &node : nodes.list()) { auto key = node.get_key(); - td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone()); + td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone(), true); } return std::move(D); } @@ -74,7 +74,7 @@ td::Result> Dht::create_client(adnl::AdnlNodeIdShort id for (auto &node : nodes.list()) { auto key = node.get_key(); - td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone()); + td::actor::send_closure(D, &DhtMember::add_full_node, key, node.clone(), true); } return std::move(D); } @@ -368,7 +368,7 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat auto node = N.move_as_ok(); if (node.adnl_id().compute_short_id() == src) { auto key = node.get_key(); - add_full_node_impl(key, std::move(node), true); + add_full_node(key, std::move(node), true); } else { VLOG(DHT_WARNING) << this << ": dropping bad node: unexpected adnl id"; } @@ -398,7 +398,7 @@ void DhtMemberImpl::receive_query(adnl::AdnlNodeIdShort src, td::BufferSlice dat ton_api::downcast_call(*Q, [&](auto &object) { this->process_query(src, object, std::move(promise)); }); } -void DhtMemberImpl::add_full_node_impl(DhtKeyId key, DhtNode node, bool set_active) { +void DhtMemberImpl::add_full_node(DhtKeyId key, DhtNode node, bool set_active) { VLOG(DHT_EXTRA_DEBUG) << this << ": adding full node " << key; auto eid = key ^ key_; @@ -466,7 +466,7 @@ void DhtMemberImpl::set_value(DhtValue value, td::Promise promise) { void DhtMemberImpl::get_value_in(DhtKeyId key, td::Promise result) { auto P = td::PromiseCreator::lambda([key, promise = std::move(result), SelfId = actor_id(this), print_id = print_id(), - adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, a = a_, + adnl = adnl_, list = get_nearest_nodes(key, k_ * 2), k = k_, a = a_, network_id = network_id_, id = id_, client_only = client_only_](td::Result R) mutable { R.ensure(); @@ -485,7 +485,7 @@ void DhtMemberImpl::register_reverse_connection(adnl::AdnlNodeIdFull client, td: auto key_id = get_reverse_connection_key(client_short).compute_key_id(); td::actor::send_closure(keyring_, &keyring::Keyring::sign_message, client_short.pubkey_hash(), register_reverse_connection_to_sign(client_short, id_, ttl), - [=, print_id = print_id(), list = get_nearest_nodes(key_id, k_), SelfId = actor_id(this), + [=, print_id = print_id(), list = get_nearest_nodes(key_id, k_ * 2), SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { TRY_RESULT_PROMISE_PREFIX(promise, signature, std::move(R), "Failed to sign: "); td::actor::send_closure(SelfId, &DhtMemberImpl::get_self_node, @@ -532,7 +532,7 @@ void DhtMemberImpl::request_reverse_ping_cont(adnl::AdnlNode target, td::BufferS } auto key_id = get_reverse_connection_key(client).compute_key_id(); get_self_node([=, target = std::move(target), signature = std::move(signature), promise = std::move(promise), - SelfId = actor_id(this), print_id = print_id(), list = get_nearest_nodes(key_id, k_), + SelfId = actor_id(this), print_id = print_id(), list = get_nearest_nodes(key_id, k_ * 2), client_only = client_only_](td::Result R) mutable { R.ensure(); td::actor::create_actor( @@ -651,8 +651,8 @@ void DhtMemberImpl::check() { DhtKeyId key{x}; auto P = td::PromiseCreator::lambda([key, promise = std::move(promise), SelfId = actor_id(this), - print_id = print_id(), adnl = adnl_, list = get_nearest_nodes(key, k_), k = k_, - a = a_, network_id = network_id_, id = id_, + print_id = print_id(), adnl = adnl_, list = get_nearest_nodes(key, k_ * 2), + k = k_, a = a_, network_id = network_id_, id = id_, client_only = client_only_](td::Result R) mutable { R.ensure(); td::actor::create_actor("FindNodesQuery", key, print_id, id, std::move(list), k, a, network_id, @@ -677,8 +677,8 @@ void DhtMemberImpl::send_store(DhtValue value, td::Promise promise) { auto key_id = value.key_id(); auto P = td::PromiseCreator::lambda([value = std::move(value), print_id = print_id(), id = id_, - client_only = client_only_, list = get_nearest_nodes(key_id, k_), k = k_, a = a_, - network_id = network_id_, SelfId = actor_id(this), adnl = adnl_, + client_only = client_only_, list = get_nearest_nodes(key_id, k_ * 2), k = k_, + a = a_, network_id = network_id_, SelfId = actor_id(this), adnl = adnl_, promise = std::move(promise)](td::Result R) mutable { R.ensure(); td::actor::create_actor("StoreQuery", std::move(value), print_id, id, std::move(list), k, a, diff --git a/dht/dht.hpp b/dht/dht.hpp index 0b46d635..9fb05e08 100644 --- a/dht/dht.hpp +++ b/dht/dht.hpp @@ -95,7 +95,7 @@ class DhtMember : public Dht { //virtual void update_addr_list(tl_object_ptr addr_list) = 0; //virtual void add_node(adnl::AdnlNodeIdShort id) = 0; - virtual void add_full_node(DhtKeyId id, DhtNode node) = 0; + virtual void add_full_node(DhtKeyId id, DhtNode node, bool set_active) = 0; virtual void receive_ping(DhtKeyId id, DhtNode result) = 0; diff --git a/doc/GlobalVersions.md b/doc/GlobalVersions.md index ccfca940..7e3b4ce9 100644 --- a/doc/GlobalVersions.md +++ b/doc/GlobalVersions.md @@ -36,4 +36,63 @@ intermediate value before division (e.g. `(xy+w)/z`). * Flag +16 in actions "Send message", "Reserve", "Change library" causes bounce if action fails. ### Storage phase -* Unpaid storage fee is now saved to `due_payment` \ No newline at end of file +* Unpaid storage fee is now saved to `due_payment` + +## Version 5 + +### Gas limits +Version 5 enables higher gas limits for special contracts. + +* Gas limit for all transactions on special contracts is set to `special_gas_limit` from `ConfigParam 20` (which is 35M at the moment of writing). +Previously only ticktock transactions had this limit, while ordinary transactions had a default limit of `gas_limit` gas (1M). +* Gas usage of special contracts is not taken into account when checking block limits. This allows keeping masterchain block limits low +while having high gas limits for elector. +* Gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` is increased to `special_gas_limit * 2` until 2024-02-29. +See [this post](https://t.me/tonstatus/88) for details. + +### Loading libraries +* Loading "nested libraries" (i.e. a library cell that points to another library cell) throws an exception. +* Loading a library consumes gas for cell load only once (for the library cell), not twice (both for the library cell and the cell in the library). +* `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. + +## Version 6 + +### c7 tuple +**c7** tuple extended from 14 to 16 elements: +* **14**: tuple that contains some config parameters as cell slices. If the parameter is absent from the config, the value is null. Asm opcode: `UNPACKEDCONFIGTUPLE`. + * **0**: `StoragePrices` from `ConfigParam 18`. Not the whole dict, but only the one StoragePrices entry (one which corresponds to the current time). + * **1**: `ConfigParam 19` (global id). + * **2**: `ConfigParam 20` (mc gas prices). + * **3**: `ConfigParam 21` (gas prices). + * **4**: `ConfigParam 24` (mc fwd fees). + * **5**: `ConfigParam 25` (fwd fees). + * **6**: `ConfigParam 43` (size limits). +* **15**: "[due payment](https://github.com/ton-blockchain/ton/blob/8a9ff339927b22b72819c5125428b70c406da631/crypto/block/block.tlb#L237)" - current debt for storage fee (nanotons). Asm opcode: `DUEPAYMENT`. + +### New TVM instructions + +#### Fee calculation +* `GETGASFEE` (`gas_used is_mc - price`) - calculates gas fee. +* `GETSTORAGEFEE` (`cells bits seconds is_mc - price`) - calculates storage fees (only current StoragePrices entry is used). +* `GETFORWARDFEE` (`cells bits is_mc - price`) - calculates forward fee. +* `GETPRECOMPILEDGAS` (`- null`) - reserved, currently returns `null`. +* `GETORIGINALFWDFEE` (`fwd_fee is_mc - orig_fwd_fee`) - calculate `fwd_fee * 2^16 / first_frac`. Can be used to get the original `fwd_fee` of the message. +* `GETGASFEESIMPLE` (`gas_used is_mc - price`) - same as `GETGASFEE`, but without flat price (just `(gas_used * price) / 2^16`). +* `GETFORWARDFEESIMPLE` (`cells bits is_mc - price`) - same as `GETFORWARDFEE`, but without lump price (just `(bits*bit_price + cells*cell_price) / 2^16`). + +`gas_used`, `cells`, `bits`, `time_delta` are integers in range `0..2^63-1`. + +#### Cell operations +Operations for working with Merkle proofs, where cells can have non-zero level and multiple hashes. +* `CLEVEL` (`cell - level`) - returns level of the cell. +* `CLEVELMASK` (`cell - level_mask`) - returns level mask of the cell. +* `i CHASHI` (`cell - hash`) - returns `i`th hash of the cell. +* `i CDEPTHI` (`cell - depth`) - returns `i`th depth of the cell. +* `CHASHIX` (`cell i - hash`) - returns `i`th hash of the cell. +* `CDEPTHIX` (`cell i - depth`) - returns `i`th depth of the cell. + +`i` is in range `0..3`. + +### Other changes +* `GLOBALID` gets `ConfigParam 19` from the tuple, not from the config dict. This decreases gas usage. +* `SENDMSG` gets `ConfigParam 24/25` (message prices) from the tuple, not from the config dict, and also uses `ConfigParam 43` to get max_msg_cells. \ No newline at end of file diff --git a/doc/Tests.md b/doc/Tests.md new file mode 100644 index 00000000..c883731a --- /dev/null +++ b/doc/Tests.md @@ -0,0 +1,24 @@ +# Tests execution +TON contains multiple unit-tests, that facilitate detection of erroneous blockchain behaviour on each commit. +## Build tests +Go inside the build directory and, if you use ninja, build the tests using the following command: + +```ninja test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state``` + +For more details on how to build TON artifacts, please refer to any of Github actions. + +For cmake use: + +```cmake --build . --target test-ed25519 test-ed25519-crypto test-bigint test-vm test-fift test-cells test-smartcont test-net test-tdactor test-tdutils test-tonlib-offline test-adnl test-dht test-rldp test-rldp2 test-catchain test-fec test-tddb test-db test-validator-session-state``` + +## Run tests +Go inside the build directory and with ninja execute: + +```ninja test``` + +with ctest: + +```ctest``` + +## Integration of tests into CI +Most relevant GitHub actions include the step ```Run tests``` that executes the tests. If any of tests fails, the action will be interrupted and no artifacts will be provided. \ No newline at end of file diff --git a/emulator/CMakeLists.txt b/emulator/CMakeLists.txt index c23f071c..969f9a88 100644 --- a/emulator/CMakeLists.txt +++ b/emulator/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(BUILD_SHARED_LIBS "Use \"OFF\" for a static build." ON) diff --git a/emulator/emulator-extern.cpp b/emulator/emulator-extern.cpp index 3a439831..f8e2f724 100644 --- a/emulator/emulator-extern.cpp +++ b/emulator/emulator-extern.cpp @@ -103,6 +103,7 @@ const char *transaction_emulator_emulate_transaction(void *transaction_emulator, td::Ref addr_slice; auto account_slice = vm::load_cell_slice(shard_account.account); + bool account_exists = block::gen::t_Account.get_tag(account_slice) == block::gen::Account::account; if (block::gen::t_Account.get_tag(account_slice) == block::gen::Account::account_none) { if (msg_tag == block::gen::CommonMsgInfo::ext_in_msg_info) { block::gen::CommonMsgInfo::Record_ext_in_msg_info info; @@ -120,12 +121,14 @@ const char *transaction_emulator_emulate_transaction(void *transaction_emulator, } else { ERROR_RESPONSE(PSTRING() << "Only ext in and int message are supported"); } - } else { + } else if (block::gen::t_Account.get_tag(account_slice) == block::gen::Account::account) { block::gen::Account::Record_account account_record; if (!tlb::unpack(account_slice, account_record)) { ERROR_RESPONSE(PSTRING() << "Can't unpack account cell"); } addr_slice = std::move(account_record.addr); + } else { + ERROR_RESPONSE(PSTRING() << "Can't parse account cell"); } ton::WorkchainId wc; ton::StdSmcAddress addr; @@ -139,8 +142,16 @@ const char *transaction_emulator_emulate_transaction(void *transaction_emulator, now = (unsigned)std::time(nullptr); } bool is_special = wc == ton::masterchainId && emulator->get_config().is_special_smartcontract(addr); - if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), td::Ref(), now, is_special)) { - ERROR_RESPONSE(PSTRING() << "Can't unpack shard account"); + if (account_exists) { + if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), now, is_special)) { + ERROR_RESPONSE(PSTRING() << "Can't unpack shard account"); + } + } else { + if (!account.init_new(now)) { + ERROR_RESPONSE(PSTRING() << "Can't init new account"); + } + account.last_trans_lt_ = shard_account.last_trans_lt; + account.last_trans_hash_ = shard_account.last_trans_hash; } auto result = emulator->emulate_transaction(std::move(account), message_cell, now, 0, block::transaction::Transaction::tr_ord); @@ -217,7 +228,7 @@ const char *transaction_emulator_emulate_tick_tock_transaction(void *transaction now = (unsigned)std::time(nullptr); } bool is_special = wc == ton::masterchainId && emulator->get_config().is_special_smartcontract(addr); - if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), td::Ref(), now, is_special)) { + if (!account.unpack(vm::load_cell_slice_ref(shard_account_cell.move_as_ok()), now, is_special)) { ERROR_RESPONSE(PSTRING() << "Can't unpack shard account"); } @@ -503,7 +514,7 @@ const char *tvm_emulator_run_get_method(void *tvm_emulator, int method_id, const auto emulator = static_cast(tvm_emulator); auto result = emulator->run_get_method(method_id, stack); - vm::FakeVmStateLimits fstate(1000); // limit recursive (de)serialization calls + vm::FakeVmStateLimits fstate(3500); // limit recursive (de)serialization calls vm::VmStateInterface::Guard guard(&fstate); vm::CellBuilder stack_cb; diff --git a/emulator/transaction-emulator.cpp b/emulator/transaction-emulator.cpp index 505c0c37..2e8ba037 100644 --- a/emulator/transaction-emulator.cpp +++ b/emulator/transaction-emulator.cpp @@ -1,7 +1,7 @@ #include #include "transaction-emulator.h" #include "crypto/common/refcnt.hpp" -#include "vm/cp0.h" +#include "vm/vm.h" #include "tdutils/td/utils/Time.h" using td::Ref; @@ -34,7 +34,7 @@ td::Result> TransactionEmu return fetch_res.move_as_error_prefix("cannot fetch config params "); } - vm::init_op_cp0(debug_enabled_); + TRY_STATUS(vm::init_vm(debug_enabled_)); if (!lt) { lt = lt_; @@ -42,6 +42,7 @@ td::Result> TransactionEmu if (!lt) { lt = (account.last_trans_lt_ / block::ConfigInfo::get_lt_align() + 1) * block::ConfigInfo::get_lt_align(); // next block after account_.last_trans_lt_ } + account.block_lt = lt - lt % block::ConfigInfo::get_lt_align(); compute_phase_cfg.libraries = std::make_unique(libraries_); compute_phase_cfg.ignore_chksig = ignore_chksig_; diff --git a/example/android/README.md b/example/android/README.md index f17ba9d2..cf12ba30 100644 --- a/example/android/README.md +++ b/example/android/README.md @@ -6,27 +6,9 @@ Prerequisite: installed Java and set environment variable JAVA_HOME. ```bash git clone --recursive https://github.com/ton-blockchain/ton.git cd ton -wget https://dl.google.com/android/repository/android-ndk-r25b-linux.zip -unzip android-ndk-r25b-linux.zip -export JAVA_AWT_LIBRARY=NotNeeded -export JAVA_JVM_LIBRARY=NotNeeded -export JAVA_INCLUDE_PATH=${JAVA_HOME}/include -export JAVA_AWT_INCLUDE_PATH=${JAVA_HOME}/include -export JAVA_INCLUDE_PATH2=${JAVA_HOME}/include/linux - -export ANDROID_NDK_ROOT=$(pwd)/android-ndk-r25b -export OPENSSL_DIR=$(pwd)/example/android/third_party/crypto -export SECP256K1_INCLUDE_DIR=$(pwd)/example/android/third_party/secp256k1/include -export SECP256K1_LIBRARY=$(pwd)/example/android/third_party/secp256k1/.libs/libsecp256k1.a -export SODIUM_INCLUDE_DIR=$(pwd)/example/android/third_party/libsodium/libsodium-android-westmere/include -export SODIUM_LIBRARY=$(pwd)/example/android/third_party/libsodium/libsodium-android-westmere/lib/libsodium.a - -rm -rf example/android/src/drinkless/org/ton/TonApi.java -cd example/android/ -cmake -GNinja -DTON_ONLY_TONLIB=ON . -ninja prepare_cross_compiling -rm CMakeCache.txt -./build-all.sh +cp assembly/android/build-android-tonlib.sh . +chmod +x build-android-tonlib.sh +sudo -E ./build-android-tonlib.sh ``` # Generation of Tonlib libraries for iOS in Xcode diff --git a/example/android/build-all.sh b/example/android/build-all.sh index f436e361..6f97dec0 100755 --- a/example/android/build-all.sh +++ b/example/android/build-all.sh @@ -1,4 +1,6 @@ #!/bin/bash +echo ANDROID_NDK_ROOT = $ANDROID_NDK_ROOT + echo Building tonlib for x86... echo ARCH="x86" ./build.sh || exit 1 diff --git a/example/android/build.sh b/example/android/build.sh index 8cecfa41..06217255 100755 --- a/example/android/build.sh +++ b/example/android/build.sh @@ -36,20 +36,28 @@ then BLST_LIBRARY=$(pwd)/third_party/blst/armv8/libblst.a fi - +ORIG_ARCH=$ARCH ARCH=$ABI mkdir -p build-$ARCH cd build-$ARCH -cmake .. -GNinja -DPORTABLE=1 \ --DANDROID_ABI=x86 -DANDROID_PLATFORM=android-32 -DANDROID_NDK=${ANDROID_NDK_ROOT} \ --DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ --DCMAKE_BUILD_TYPE=Release -DANDROID_ABI=${ABI} \ --DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ARCH} -DTON_ARCH="" \ +cmake .. -GNinja \ +-DPORTABLE=1 \ -DTON_ONLY_TONLIB=ON \ --DSECP256K1_INCLUDE_DIR=${SECP256K1_INCLUDE_DIR} -DSECP256K1_LIBRARY=${SECP256K1_LIBRARY} \ --DSODIUM_INCLUDE_DIR=${SODIUM_INCLUDE_DIR} -DSODIUM_LIBRARY_RELEASE=${SODIUM_LIBRARY_RELEASE} \ +-DTON_ARCH="" \ +-DANDROID_ABI=x86 \ +-DANDROID_PLATFORM=android-32 \ +-DANDROID_NDK=${ANDROID_NDK_ROOT} \ +-DCMAKE_TOOLCHAIN_FILE=${ANDROID_NDK_ROOT}/build/cmake/android.toolchain.cmake \ +-DCMAKE_BUILD_TYPE=Release \ +-DANDROID_ABI=${ABI} \ +-DOPENSSL_ROOT_DIR=${OPENSSL_DIR}/${ORIG_ARCH} \ +-DSECP256K1_FOUND=1 \ +-DSECP256K1_INCLUDE_DIR=${SECP256K1_INCLUDE_DIR} \ +-DSECP256K1_LIBRARY=${SECP256K1_LIBRARY} \ +-DSODIUM_INCLUDE_DIR=${SODIUM_INCLUDE_DIR} \ +-DSODIUM_LIBRARY_RELEASE=${SODIUM_LIBRARY_RELEASE} \ -DSODIUM_USE_STATIC_LIBS=1 \ -DBLST_LIB=${BLST_LIBRARY} || exit 1 diff --git a/fec/CMakeLists.txt b/fec/CMakeLists.txt index b1ac37b1..2a305607 100644 --- a/fec/CMakeLists.txt +++ b/fec/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/fec/fec.cpp b/fec/fec.cpp index 102df038..f6379fd7 100644 --- a/fec/fec.cpp +++ b/fec/fec.cpp @@ -99,7 +99,7 @@ td::uint32 FecType::symbol_size() const { } td::Result FecType::create(tl_object_ptr obj) { - td::int32 data_size_int, symbol_size_int, symbols_count_int; + td::int32 data_size_int = 0, symbol_size_int = 0, symbols_count_int = 0; ton_api::downcast_call(*obj, td::overloaded([&](const auto &obj) { data_size_int = obj.data_size_; symbol_size_int = obj.symbol_size_; diff --git a/http/CMakeLists.txt b/http/CMakeLists.txt index dbc57ec2..4a3fccf8 100644 --- a/http/CMakeLists.txt +++ b/http/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(HTTP_SOURCE http.h diff --git a/keyring/CMakeLists.txt b/keyring/CMakeLists.txt index 29e48ee9..f8f610f2 100644 --- a/keyring/CMakeLists.txt +++ b/keyring/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(KEYRING_SOURCE keyring.h diff --git a/keys/CMakeLists.txt b/keys/CMakeLists.txt index 486119de..e80436b7 100644 --- a/keys/CMakeLists.txt +++ b/keys/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(KEYS_SOURCE keys.cpp diff --git a/lite-client/CMakeLists.txt b/lite-client/CMakeLists.txt index fd5c812c..5774c050 100644 --- a/lite-client/CMakeLists.txt +++ b/lite-client/CMakeLists.txt @@ -1,6 +1,6 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) -add_library(lite-client-common lite-client-common.cpp lite-client-common.h ext-client.cpp ext-client.h QueryTraits.h) +add_library(lite-client-common STATIC lite-client-common.cpp lite-client-common.h ext-client.cpp ext-client.h QueryTraits.h) target_link_libraries(lite-client-common PUBLIC tdutils tdactor adnllite tl_api tl_lite_api tl-lite-utils ton_crypto ton_block) add_executable(lite-client lite-client.cpp lite-client.h ext-client.h ext-client.cpp) diff --git a/lite-client/lite-client.cpp b/lite-client/lite-client.cpp index c6988ef3..b374d9a7 100644 --- a/lite-client/lite-client.cpp +++ b/lite-client/lite-client.cpp @@ -4430,7 +4430,7 @@ int main(int argc, char* argv[]) { }); #endif - vm::init_op_cp0(true); // enable vm debug + vm::init_vm(true).ensure(); // enable vm debug td::actor::Scheduler scheduler({2}); diff --git a/memprof/CMakeLists.txt b/memprof/CMakeLists.txt index 8559c4d9..2ccf11df 100644 --- a/memprof/CMakeLists.txt +++ b/memprof/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(MEMPROF_SOURCE memprof/memprof.cpp diff --git a/overlay/CMakeLists.txt b/overlay/CMakeLists.txt index 7adc0584..ab9722a6 100644 --- a/overlay/CMakeLists.txt +++ b/overlay/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/overlay/overlay-manager.cpp b/overlay/overlay-manager.cpp index f9a79878..17e94aae 100644 --- a/overlay/overlay-manager.cpp +++ b/overlay/overlay-manager.cpp @@ -92,17 +92,17 @@ void OverlayManager::create_public_overlay(adnl::AdnlNodeIdShort local_id, Overl std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) { create_public_overlay_ex(local_id, std::move(overlay_id), std::move(callback), std::move(rules), std::move(scope), - true); + {}); } void OverlayManager::create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, - td::string scope, bool announce_self) { + td::string scope, OverlayOptions opts) { CHECK(!dht_node_.empty()); auto id = overlay_id.compute_short_id(); register_overlay(local_id, id, Overlay::create(keyring_, adnl_, actor_id(this), dht_node_, local_id, std::move(overlay_id), - std::move(callback), std::move(rules), scope, announce_self)); + std::move(callback), std::move(rules), scope, std::move(opts))); } void OverlayManager::create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, diff --git a/overlay/overlay-manager.h b/overlay/overlay-manager.h index 5b59b371..aeca69da 100644 --- a/overlay/overlay-manager.h +++ b/overlay/overlay-manager.h @@ -54,7 +54,7 @@ class OverlayManager : public Overlays { std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope) override; void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, td::string scope, - bool announce_self) override; + OverlayOptions opts) override; void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) override; diff --git a/overlay/overlay.cpp b/overlay/overlay.cpp index ced7efea..efa19d9b 100644 --- a/overlay/overlay.cpp +++ b/overlay/overlay.cpp @@ -38,10 +38,10 @@ td::actor::ActorOwn Overlay::create(td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool announce_self) { + OverlayPrivacyRules rules, td::string scope, OverlayOptions opts) { auto R = td::actor::create_actor("overlay", keyring, adnl, manager, dht_node, local_id, std::move(overlay_id), true, std::vector(), - std::move(callback), std::move(rules), scope, announce_self); + std::move(callback), std::move(rules), scope, opts); return td::actor::ActorOwn(std::move(R)); } @@ -61,7 +61,7 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool announce_self) + OverlayPrivacyRules rules, td::string scope, OverlayOptions opts) : keyring_(keyring) , adnl_(adnl) , manager_(manager) @@ -72,7 +72,8 @@ OverlayImpl::OverlayImpl(td::actor::ActorId keyring, td::actor , public_(pub) , rules_(std::move(rules)) , scope_(scope) - , announce_self_(announce_self) { + , announce_self_(opts.announce_self_) + , frequent_dht_lookup_(opts.frequent_dht_lookup_) { overlay_id_ = id_full_.compute_short_id(); VLOG(OVERLAY_INFO) << this << ": creating " << (public_ ? "public" : "private"); @@ -275,13 +276,13 @@ void OverlayImpl::alarm() { send_random_peers(P->get_id(), {}); } } - if (next_dht_query_.is_in_past()) { + if (next_dht_query_ && next_dht_query_.is_in_past()) { + next_dht_query_ = td::Timestamp::never(); auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result res) { td::actor::send_closure(SelfId, &OverlayImpl::receive_dht_nodes, std::move(res), true); }); td::actor::send_closure(dht_node_, &dht::Dht::get_value, dht::DhtKey{overlay_id_.pubkey_hash(), "nodes", 0}, std::move(P)); - next_dht_query_ = td::Timestamp::in(td::Random::fast(60.0, 100.0)); } if (update_db_at_.is_in_past()) { if (peers_.size() > 0) { @@ -329,7 +330,13 @@ void OverlayImpl::receive_dht_nodes(td::Result res, bool dummy) { VLOG(OVERLAY_NOTICE) << this << ": can not get value from DHT: " << res.move_as_error(); } + if (!(next_dht_store_query_ && next_dht_store_query_.is_in_past())) { + finish_dht_query(); + return; + } + next_dht_store_query_ = td::Timestamp::never(); if (!announce_self_) { + finish_dht_query(); return; } @@ -337,6 +344,7 @@ void OverlayImpl::receive_dht_nodes(td::Result res, bool dummy) { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), oid = print_id()](td::Result R) { if (R.is_error()) { LOG(ERROR) << oid << "cannot get self node"; + td::actor::send_closure(SelfId, &OverlayImpl::finish_dht_query); return; } td::actor::send_closure(SelfId, &OverlayImpl::update_dht_nodes, R.move_as_ok()); @@ -361,10 +369,11 @@ void OverlayImpl::update_dht_nodes(OverlayNode node) { static_cast(td::Clocks::system() + 3600), td::BufferSlice()}; value.check().ensure(); - auto P = td::PromiseCreator::lambda([oid = print_id()](td::Result res) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), oid = print_id()](td::Result res) { if (res.is_error()) { VLOG(OVERLAY_NOTICE) << oid << ": error storing to DHT: " << res.move_as_error(); } + td::actor::send_closure(SelfId, &OverlayImpl::finish_dht_query); }); td::actor::send_closure(dht_node_, &dht::Dht::set_value, std::move(value), std::move(P)); diff --git a/overlay/overlay.h b/overlay/overlay.h index 5141ae53..1b5264a8 100644 --- a/overlay/overlay.h +++ b/overlay/overlay.h @@ -42,7 +42,7 @@ class Overlay : public td::actor::Actor { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope, bool announce_self = true); + OverlayPrivacyRules rules, td::string scope, OverlayOptions opts = {}); static td::actor::ActorOwn create(td::actor::ActorId keyring, td::actor::ActorId adnl, td::actor::ActorId manager, diff --git a/overlay/overlay.hpp b/overlay/overlay.hpp index a8fd7f30..1137aa8d 100644 --- a/overlay/overlay.hpp +++ b/overlay/overlay.hpp @@ -82,12 +82,17 @@ class OverlayPeer { void on_ping_result(bool success) { if (success) { missed_pings_ = 0; + last_ping_at_ = td::Timestamp::now(); + is_alive_ = true; } else { ++missed_pings_; + if (missed_pings_ >= 3 && last_ping_at_.is_in_past(td::Timestamp::in(-15.0))) { + is_alive_ = false; + } } } bool is_alive() const { - return missed_pings_ < 3; + return is_alive_; } td::uint32 throughput_out_bytes = 0; @@ -116,6 +121,8 @@ class OverlayPeer { bool is_neighbour_ = false; size_t missed_pings_ = 0; + bool is_alive_ = true; + td::Timestamp last_ping_at_ = td::Timestamp::now(); }; class OverlayImpl : public Overlay { @@ -124,7 +131,7 @@ class OverlayImpl : public Overlay { td::actor::ActorId manager, td::actor::ActorId dht_node, adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, bool pub, std::vector nodes, std::unique_ptr callback, - OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", bool announce_self = true); + OverlayPrivacyRules rules, td::string scope = "{ \"type\": \"undefined\" }", OverlayOptions opts = {}); void update_dht_node(td::actor::ActorId dht) override { dht_node_ = dht; } @@ -317,6 +324,17 @@ class OverlayImpl : public Overlay { void del_peer(adnl::AdnlNodeIdShort id); OverlayPeer *get_random_peer(bool only_alive = false); + void finish_dht_query() { + if (!next_dht_store_query_) { + next_dht_store_query_ = td::Timestamp::in(td::Random::fast(60.0, 100.0)); + } + if (frequent_dht_lookup_ && peers_.size() == bad_peers_.size()) { + next_dht_query_ = td::Timestamp::in(td::Random::fast(6.0, 10.0)); + } else { + next_dht_query_ = next_dht_store_query_; + } + } + td::actor::ActorId keyring_; td::actor::ActorId adnl_; td::actor::ActorId manager_; @@ -327,6 +345,7 @@ class OverlayImpl : public Overlay { td::DecTree peers_; td::Timestamp next_dht_query_ = td::Timestamp::in(1.0); + td::Timestamp next_dht_store_query_ = td::Timestamp::in(1.0); td::Timestamp update_db_at_; td::Timestamp update_throughput_at_; td::Timestamp last_throughput_update_; @@ -390,6 +409,7 @@ class OverlayImpl : public Overlay { OverlayPrivacyRules rules_; td::string scope_; bool announce_self_ = true; + bool frequent_dht_lookup_ = false; std::map> certs_; class CachedEncryptor : public td::ListNode { diff --git a/overlay/overlays.h b/overlay/overlays.h index ee225c3b..cc93fae3 100644 --- a/overlay/overlays.h +++ b/overlay/overlays.h @@ -158,6 +158,11 @@ class Certificate { td::SharedSlice signature_; }; +struct OverlayOptions { + bool announce_self_ = true; + bool frequent_dht_lookup_ = false; +}; + class Overlays : public td::actor::Actor { public: class Callback { @@ -204,7 +209,7 @@ class Overlays : public td::actor::Actor { td::string scope) = 0; virtual void create_public_overlay_ex(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::unique_ptr callback, OverlayPrivacyRules rules, - td::string scope, bool announce_self) = 0; + td::string scope, OverlayOptions opts) = 0; virtual void create_private_overlay(adnl::AdnlNodeIdShort local_id, OverlayIdFull overlay_id, std::vector nodes, std::unique_ptr callback, OverlayPrivacyRules rules) = 0; diff --git a/recent_changelog.md b/recent_changelog.md index 39ca5c94..852fb764 100644 --- a/recent_changelog.md +++ b/recent_changelog.md @@ -1,8 +1,17 @@ -## 2023.05 Update -1. Archive manager optimization -2. A series of catchain (basic consensus protocol) security improvements -3. Update for Fift libraries and FunC: better error-handling, fixes for `catch` stack recovery -4. A series of out message queue handling optimization (already deployed during emergency upgrades between releases) -5. Improvement of binaries portability +## 2024.01 Update -Besides the work of the core team, this update is based on the efforts of @aleksej-paschenko (portability improvement), [Disintar team](https://github.com/disintar/) (archive manager optimization) and [sec3-service](https://github.com/sec3-service) security auditors (funC improvements). +1. Fixes in how gas in transactions on special accounts is accounted in block limit. Previously, gas was counted as usual, so to conduct elections that costs >30m gas block limit in masterchain was set to 37m gas. To lower the limit for safety reasons it is proposed to caunt gas on special accounts separately. Besides `gas_max` is set to `special_gas_limit` for all types of transactions on special accounts. New behavior is activated through setting `version >= 5` in `ConfigParam 8;`. + * Besides update of config temporally increases gas limit on `EQD_v9j1rlsuHHw2FIhcsCFFSD367ldfDdCKcsNmNpIRzUlu` to `special_gas_limit`, see [details](https://t.me/tonstatus/88). +2. Improvements in LS behavior + * Improved detection of the state with all shards applied to decrease rate of `Block is not applied` error + * Better error logs: `block not in db` and `block is not applied` separation + * Fix error in proof generation for blocks after merge + * Fix most of `block is not applied` issues related to sending too recent block in Proofs + * LS now check external messages till `accept_message` (`set_gas`). +3. Improvements in DHT work and storage, CellDb, config.json ammendment, peer misbehavior detection, validator session stats collection, emulator. +4. Change in CTOS and XLOAD behavior activated through setting `version >= 5` in `ConfigParam 8;`: + * Loading "nested libraries" (i.e. a library cell that points to another library cell) throws an exception. + * Loading a library consumes gas for cell load only once (for the library cell), not twice (both for the library cell and the cell in the library). + * `XLOAD` now works differently. When it takes a library cell, it returns the cell that it points to. This allows loading "nested libraries", if needed. + +Besides the work of the Core team, this update is based on the efforts of @XaBbl4 (peer misbehavior detection) and @akifoq (CTOS behavior and gas limit scheme for special accounts). diff --git a/rldp-http-proxy/CMakeLists.txt b/rldp-http-proxy/CMakeLists.txt index 92cb01ac..f7e30c80 100644 --- a/rldp-http-proxy/CMakeLists.txt +++ b/rldp-http-proxy/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_executable(rldp-http-proxy rldp-http-proxy.cpp DNSResolver.h DNSResolver.cpp) target_include_directories(rldp-http-proxy PUBLIC $) diff --git a/rldp-http-proxy/rldp-http-proxy.cpp b/rldp-http-proxy/rldp-http-proxy.cpp index e9186822..0d518d6d 100644 --- a/rldp-http-proxy/rldp-http-proxy.cpp +++ b/rldp-http-proxy/rldp-http-proxy.cpp @@ -54,6 +54,7 @@ #include "git.h" #include "td/utils/BufferedFd.h" #include "common/delay.h" +#include "td/utils/port/path.h" #include "tonlib/tonlib/TonlibClientWrapper.h" #include "DNSResolver.h" @@ -920,6 +921,12 @@ class RldpHttpProxy : public td::actor::Actor { } void run() { + if (!db_root_.empty()) { + td::mkpath(db_root_ + "/").ensure(); + } else if (!is_client_) { + LOG(ERROR) << "DB root is required for server proxy"; + std::_Exit(2); + } keyring_ = ton::keyring::Keyring::create(is_client_ ? std::string("") : (db_root_ + "/keyring")); { auto S = load_global_config(); @@ -955,9 +962,16 @@ class RldpHttpProxy : public td::actor::Actor { auto conf_dataR = td::read_file(global_config_); conf_dataR.ensure(); + ton::tl_object_ptr key_store; + if (db_root_.empty()) { + key_store = tonlib_api::make_object(); + } else { + td::mkpath(db_root_ + "/tonlib-cache/").ensure(); + key_store = tonlib_api::make_object(db_root_ + "/tonlib-cache/"); + } auto tonlib_options = tonlib_api::make_object( tonlib_api::make_object(conf_dataR.move_as_ok().as_slice().str(), "", false, false), - tonlib_api::make_object()); + std::move(key_store)); tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); dns_resolver_ = td::actor::create_actor("dnsresolver", tonlib_client_.get()); } diff --git a/rldp/CMakeLists.txt b/rldp/CMakeLists.txt index 813d346d..39e0d3ca 100644 --- a/rldp/CMakeLists.txt +++ b/rldp/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/rldp2/CMakeLists.txt b/rldp2/CMakeLists.txt index 1bfeb0bb..c144ec01 100644 --- a/rldp2/CMakeLists.txt +++ b/rldp2/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/storage/CMakeLists.txt b/storage/CMakeLists.txt index a5f36ff2..0208a33d 100644 --- a/storage/CMakeLists.txt +++ b/storage/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/storage/PeerActor.cpp b/storage/PeerActor.cpp index 0cb21c0a..48d45626 100644 --- a/storage/PeerActor.cpp +++ b/storage/PeerActor.cpp @@ -25,6 +25,7 @@ #include "td/utils/overloaded.h" #include "td/utils/Random.h" #include "vm/boc.h" +#include "common/delay.h" namespace ton { @@ -119,9 +120,9 @@ void PeerActor::on_get_piece_result(PartId piece_id, td::Result return std::move(res); }(); if (res.is_error()) { - LOG(DEBUG) << "getPiece " << piece_id << "query: " << res.error(); + LOG(DEBUG) << "getPiece " << piece_id << " query: " << res.error(); } else { - LOG(DEBUG) << "getPiece " << piece_id << "query: OK"; + LOG(DEBUG) << "getPiece " << piece_id << " query: OK"; } state_->node_queries_results_.add_element(std::make_pair(piece_id, std::move(res))); notify_node(); @@ -343,11 +344,20 @@ void PeerActor::loop_node_get_piece() { } auto piece_size = std::min(torrent_info_->piece_size, torrent_info_->file_size - part * torrent_info_->piece_size); - td::actor::send_closure(state_->speed_limiters_.download, &SpeedLimiter::enqueue, (double)piece_size, - td::Timestamp::in(3.0), [part, SelfId = actor_id(this)](td::Result R) { - td::actor::send_closure(SelfId, &PeerActor::node_get_piece_query_ready, part, - std::move(R)); - }); + td::Timestamp timeout = td::Timestamp::in(3.0); + td::actor::send_closure( + state_->speed_limiters_.download, &SpeedLimiter::enqueue, (double)piece_size, timeout, + [=, SelfId = actor_id(this)](td::Result R) { + if (R.is_ok()) { + td::actor::send_closure(SelfId, &PeerActor::node_get_piece_query_ready, part, std::move(R)); + } else { + delay_action( + [=, R = std::move(R)]() mutable { + td::actor::send_closure(SelfId, &PeerActor::node_get_piece_query_ready, part, std::move(R)); + }, + timeout); + } + }); } } diff --git a/storage/PeerManager.h b/storage/PeerManager.h index 52297ac5..38d1494b 100644 --- a/storage/PeerManager.h +++ b/storage/PeerManager.h @@ -143,9 +143,11 @@ class PeerManager : public td::actor::Actor { td::actor::ActorId peer_manager_; ton::adnl::AdnlNodeIdShort dst_; }; + ton::overlay::OverlayOptions opts; + opts.announce_self_ = !client_mode_; + opts.frequent_dht_lookup_ = true; send_closure(overlays_, &ton::overlay::Overlays::create_public_overlay_ex, src_id, overlay_id_.clone(), - std::make_unique(actor_id(this), src_id), rules, R"({ "type": "storage" })", - !client_mode_); + std::make_unique(actor_id(this), src_id), rules, R"({ "type": "storage" })", opts); } promise.set_value({}); } diff --git a/storage/SpeedLimiter.cpp b/storage/SpeedLimiter.cpp index 952005fe..704c7402 100644 --- a/storage/SpeedLimiter.cpp +++ b/storage/SpeedLimiter.cpp @@ -16,6 +16,7 @@ */ #include "SpeedLimiter.h" +#include "common/errorcode.h" namespace ton { @@ -41,11 +42,11 @@ void SpeedLimiter::enqueue(double size, td::Timestamp timeout, td::Promise( tonlib_api::make_object(r_conf_data.move_as_ok().as_slice().str(), "", false, false), - tonlib_api::make_object()); + tonlib_api::make_object(key_store)); tonlib_client_ = td::actor::create_actor("tonlibclient", std::move(tonlib_options)); } diff --git a/storage/test/storage.cpp b/storage/test/storage.cpp index e7a97352..ff5a4831 100644 --- a/storage/test/storage.cpp +++ b/storage/test/storage.cpp @@ -400,7 +400,6 @@ class NetChannel : public td::actor::Actor { break; } else if (l > alive_end - eps) { alive_begin += alive_step + sleep_step; - alive_end = alive_begin + alive_step; } else { double new_l = td::min(alive_end, r); res += (new_l - l) * speed; @@ -516,9 +515,7 @@ class NetChannel : public td::actor::Actor { queue_ = {}; } - bool ok = false; while (!queue_.empty() && (double)queue_.front().size < got_) { - ok = true; auto query = queue_.pop(); got_ -= (double)query.size; total_size_ -= (double)query.size; diff --git a/tdactor/CMakeLists.txt b/tdactor/CMakeLists.txt index 3490eb17..46dd0335 100644 --- a/tdactor/CMakeLists.txt +++ b/tdactor/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #SOURCE SETS set(TDACTOR_SOURCE diff --git a/tdactor/benchmark/CMakeLists.txt b/tdactor/benchmark/CMakeLists.txt index e01d33dc..c4ff79a1 100644 --- a/tdactor/benchmark/CMakeLists.txt +++ b/tdactor/benchmark/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(BENCHMARK_SOURCE benchmark.cpp diff --git a/tdactor/test/actors_core.cpp b/tdactor/test/actors_core.cpp index ae10eb9b..96cd6239 100644 --- a/tdactor/test/actors_core.cpp +++ b/tdactor/test/actors_core.cpp @@ -720,7 +720,7 @@ TEST(Actor2, actor_function_result) { } TEST(Actor2, actor_ping_pong) { - Scheduler scheduler{{3}, Scheduler::Paused}; + Scheduler scheduler{{3}, false, Scheduler::Paused}; sb.clear(); scheduler.start(); @@ -799,7 +799,7 @@ TEST(Actor2, Schedulers) { for (auto run_count : {0, 1, 2}) { for (auto stop_count : {0, 1, 2}) { for (size_t threads : {0, 1}) { - Scheduler scheduler({threads}, mode); + Scheduler scheduler({threads}, false, mode); for (int i = 0; i < start_count; i++) { scheduler.start(); } diff --git a/tdactor/test/actors_promise.cpp b/tdactor/test/actors_promise.cpp index f1d57069..5717b394 100644 --- a/tdactor/test/actors_promise.cpp +++ b/tdactor/test/actors_promise.cpp @@ -210,7 +210,7 @@ TEST(Actor, promise_future) { TEST(Actor2, actor_lost_promise) { using namespace td::actor; using namespace td; - Scheduler scheduler({1}, Scheduler::Paused); + Scheduler scheduler({1}, false, Scheduler::Paused); auto watcher = td::create_shared_destructor([] { LOG(ERROR) << "STOP"; diff --git a/tddb/CMakeLists.txt b/tddb/CMakeLists.txt index 1acd5420..89730b95 100644 --- a/tddb/CMakeLists.txt +++ b/tddb/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #SOURCE SETS set(TDDB_UTILS_SOURCE diff --git a/tddb/td/db/utils/BlobView.cpp b/tddb/td/db/utils/BlobView.cpp index 7011a00e..ebfbc6d5 100644 --- a/tddb/td/db/utils/BlobView.cpp +++ b/tddb/td/db/utils/BlobView.cpp @@ -311,6 +311,7 @@ td::Result FileMemoryMappingBlobView::create(td::CSlice file_path, td: class CyclicBlobViewImpl : public BlobViewImpl { public: CyclicBlobViewImpl(td::BufferSlice data, td::uint64 total_size) : data_(std::move(data)), total_size_(total_size) { + CHECK(!data_.empty()); } td::Result view_impl(td::MutableSlice slice, td::uint64 offset) override { auto res = slice; diff --git a/tdfec/CMakeLists.txt b/tdfec/CMakeLists.txt index adfe2fdb..828ff90d 100644 --- a/tdfec/CMakeLists.txt +++ b/tdfec/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(TDFEC_SOURCE td/fec/raptorq/Rfc.cpp diff --git a/tdfec/benchmark/CMakeLists.txt b/tdfec/benchmark/CMakeLists.txt index 93ec575d..ee8f72cb 100644 --- a/tdfec/benchmark/CMakeLists.txt +++ b/tdfec/benchmark/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_executable(benchmark-fec benchmark.cpp ) target_include_directories(benchmark-fec PUBLIC $) diff --git a/tdfec/td/fec/raptorq/Rfc.h b/tdfec/td/fec/raptorq/Rfc.h index 1f5c27f0..e3a33131 100644 --- a/tdfec/td/fec/raptorq/Rfc.h +++ b/tdfec/td/fec/raptorq/Rfc.h @@ -61,7 +61,7 @@ class Rfc { template void encoding_row_for_each(EncodingRow t, F &&f) const { f(t.b); - for (uint16 j = 1; j < t.d; ++j) { + for (uint32 j = 1; j < t.d; ++j) { t.b = (t.b + t.a) % W; f(t.b); } diff --git a/tdnet/CMakeLists.txt b/tdnet/CMakeLists.txt index d5ae7086..bc00a676 100644 --- a/tdnet/CMakeLists.txt +++ b/tdnet/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(TDNET_SOURCE td/net/FdListener.cpp diff --git a/tdnet/td/net/TcpListener.cpp b/tdnet/td/net/TcpListener.cpp index 7b7364ba..e711cbbd 100644 --- a/tdnet/td/net/TcpListener.cpp +++ b/tdnet/td/net/TcpListener.cpp @@ -46,9 +46,11 @@ void TcpListener::start_up() { } void TcpListener::tear_down() { - // unsubscribe from socket updates - // nb: interface will be changed - td::actor::SchedulerContext::get()->get_poll().unsubscribe(server_socket_fd_.get_poll_info().get_pollable_fd_ref()); + if (!server_socket_fd_.empty()) { + // unsubscribe from socket updates + // nb: interface will be changed + td::actor::SchedulerContext::get()->get_poll().unsubscribe(server_socket_fd_.get_poll_info().get_pollable_fd_ref()); + } } void TcpListener::loop() { diff --git a/tdnet/test/net-test.cpp b/tdnet/test/net-test.cpp index bb084a67..d20be504 100644 --- a/tdnet/test/net-test.cpp +++ b/tdnet/test/net-test.cpp @@ -158,9 +158,11 @@ void run_server(int from_port, int to_port, bool is_first, bool use_tcp) { TEST(Net, PingPong) { SET_VERBOSITY_LEVEL(VERBOSITY_NAME(ERROR)); + int port1 = td::Random::fast(10000, 10999); + int port2 = td::Random::fast(11000, 11999); for (auto use_tcp : {false, true}) { - auto a = td::thread([use_tcp] { run_server(8091, 8092, true, use_tcp); }); - auto b = td::thread([use_tcp] { run_server(8092, 8091, false, use_tcp); }); + auto a = td::thread([=] { run_server(port1, port2, true, use_tcp); }); + auto b = td::thread([=] { run_server(port2, port1, false, use_tcp); }); a.join(); b.join(); } diff --git a/tdtl/CMakeLists.txt b/tdtl/CMakeLists.txt index b0f83cd9..482bd0f7 100644 --- a/tdtl/CMakeLists.txt +++ b/tdtl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) #SOURCE SETS set(TDTL_SOURCE diff --git a/tdutils/CMakeLists.txt b/tdutils/CMakeLists.txt index 7b577e4f..f1e4b1ea 100644 --- a/tdutils/CMakeLists.txt +++ b/tdutils/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(TDUTILS_MIME_TYPE "Generate mime types conversion (gperf is required)" ON) @@ -314,7 +314,7 @@ if (WIN32) # find_library(WS2_32_LIBRARY ws2_32) # find_library(MSWSOCK_LIBRARY Mswsock) # target_link_libraries(tdutils PRIVATE ${WS2_32_LIBRARY} ${MSWSOCK_LIBRARY}) - target_link_libraries(tdutils PRIVATE ws2_32 Mswsock Normaliz psapi) + target_link_libraries(tdutils PRIVATE ws2_32 Mswsock Normaliz psapi DbgHelp) endif() if (NOT CMAKE_CROSSCOMPILING AND TDUTILS_MIME_TYPE) add_dependencies(tdutils tdmime_auto) diff --git a/tdutils/generate/CMakeLists.txt b/tdutils/generate/CMakeLists.txt index 07353e51..194fda39 100644 --- a/tdutils/generate/CMakeLists.txt +++ b/tdutils/generate/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) # Generates files for MIME type <-> extension conversions # DEPENDS ON: gperf grep bash/powershell diff --git a/tdutils/td/utils/BigNum.cpp b/tdutils/td/utils/BigNum.cpp index 36dde064..9de11fca 100644 --- a/tdutils/td/utils/BigNum.cpp +++ b/tdutils/td/utils/BigNum.cpp @@ -159,7 +159,11 @@ bool BigNum::is_bit_set(int num) const { } bool BigNum::is_prime(BigNumContext &context) const { +#if OPENSSL_VERSION_MAJOR >= 3 + int result = BN_check_prime(impl_->big_num, context.impl_->big_num_context, nullptr); +#else int result = BN_is_prime_ex(impl_->big_num, BN_prime_checks, context.impl_->big_num_context, nullptr); +#endif LOG_IF(FATAL, result == -1); return result == 1; } diff --git a/tdutils/td/utils/BufferedUdp.h b/tdutils/td/utils/BufferedUdp.h index bf4aa1b8..3fa93e9d 100644 --- a/tdutils/td/utils/BufferedUdp.h +++ b/tdutils/td/utils/BufferedUdp.h @@ -106,6 +106,7 @@ class UdpReader { } if (status.is_error() && !UdpSocketFd::is_critical_read_error(status)) { queue.push(UdpMessage{{}, {}, std::move(status)}); + return td::Status::OK(); } return status; } diff --git a/tdutils/td/utils/Time.h b/tdutils/td/utils/Time.h index 5151b818..ece822d4 100644 --- a/tdutils/td/utils/Time.h +++ b/tdutils/td/utils/Time.h @@ -110,6 +110,7 @@ class Timestamp { } friend bool operator==(Timestamp a, Timestamp b); + friend Timestamp &operator+=(Timestamp &a, double b); private: double at_{0}; @@ -122,6 +123,11 @@ inline bool operator<(const Timestamp &a, const Timestamp &b) { return a.at() < b.at(); } +inline Timestamp &operator+=(Timestamp &a, double b) { + a.at_ += b; + return a; +} + template void store(const Timestamp ×tamp, StorerT &storer) { storer.store_binary(timestamp.at() - Time::now() + Clocks::system()); diff --git a/tdutils/td/utils/as.h b/tdutils/td/utils/as.h index c60c74e2..6015af29 100644 --- a/tdutils/td/utils/as.h +++ b/tdutils/td/utils/as.h @@ -76,12 +76,7 @@ class ConstAs { } // namespace detail -// no std::is_trivially_copyable in libstdc++ before 5.0 -#if __GLIBCXX__ -#define TD_IS_TRIVIALLY_COPYABLE(T) __has_trivial_copy(T) -#else #define TD_IS_TRIVIALLY_COPYABLE(T) std::is_trivially_copyable::value -#endif template = 0> diff --git a/tdutils/td/utils/crypto.cpp b/tdutils/td/utils/crypto.cpp index 27313cf3..ea1efbe7 100644 --- a/tdutils/td/utils/crypto.cpp +++ b/tdutils/td/utils/crypto.cpp @@ -25,7 +25,6 @@ #include "td/utils/logging.h" #include "td/utils/misc.h" #include "td/utils/port/RwMutex.h" -#include "td/utils/port/thread_local.h" #include "td/utils/Random.h" #include "td/utils/ScopeGuard.h" #include "td/utils/SharedSlice.h" @@ -598,16 +597,23 @@ void aes_ige_decrypt(Slice aes_key, MutableSlice aes_iv, Slice from, MutableSlic static void aes_cbc_xcrypt(Slice aes_key, MutableSlice aes_iv, Slice from, MutableSlice to, bool encrypt_flag) { CHECK(aes_key.size() == 32); CHECK(aes_iv.size() == 16); - AES_KEY key; - int err; - if (encrypt_flag) { - err = AES_set_encrypt_key(aes_key.ubegin(), 256, &key); - } else { - err = AES_set_decrypt_key(aes_key.ubegin(), 256, &key); - } - LOG_IF(FATAL, err != 0); CHECK(from.size() <= to.size()); - AES_cbc_encrypt(from.ubegin(), to.ubegin(), from.size(), &key, aes_iv.ubegin(), encrypt_flag); + CHECK(from.size() % 16 == 0); + int out_len = 0; + EVP_CIPHER_CTX *ctx = EVP_CIPHER_CTX_new(); + CHECK(ctx); + if (encrypt_flag) { + CHECK(EVP_EncryptInit_ex(ctx, EVP_aes_256_cbc(), nullptr, aes_key.ubegin(), aes_iv.ubegin()) == 1); + CHECK(EVP_CIPHER_CTX_set_padding(ctx, 0) == 1); + CHECK(EVP_EncryptUpdate(ctx, to.ubegin(), &out_len, from.ubegin(), td::narrow_cast(from.size())) == 1); + CHECK(EVP_EncryptFinal_ex(ctx, to.ubegin() + out_len, &out_len) == 1); + } else { + CHECK(EVP_DecryptInit_ex(ctx, EVP_aes_256_cbc(), nullptr, aes_key.ubegin(), aes_iv.ubegin()) == 1); + CHECK(EVP_CIPHER_CTX_set_padding(ctx, 0) == 1); + CHECK(EVP_DecryptUpdate(ctx, to.ubegin(), &out_len, from.ubegin(), td::narrow_cast(from.size())) == 1); + CHECK(EVP_DecryptFinal_ex(ctx, to.ubegin() + out_len, &out_len) == 1); + } + EVP_CIPHER_CTX_free(ctx); } void aes_cbc_encrypt(Slice aes_key, MutableSlice aes_iv, Slice from, MutableSlice to) { @@ -723,7 +729,18 @@ string sha512(Slice data) { class Sha256State::Impl { public: - SHA256_CTX ctx_; + EVP_MD_CTX *ctx_ = nullptr; + + Impl() { + ctx_ = EVP_MD_CTX_new(); + CHECK(ctx_); + } + + ~Impl() { + if (ctx_) { + EVP_MD_CTX_free(ctx_); + } + } }; Sha256State::Sha256State() = default; @@ -755,24 +772,23 @@ void Sha256State::init() { impl_ = make_unique(); } CHECK(!is_inited_); - int err = SHA256_Init(&impl_->ctx_); - LOG_IF(FATAL, err != 1); + CHECK(EVP_DigestInit_ex(impl_->ctx_, EVP_sha256(), nullptr) == 1); is_inited_ = true; } void Sha256State::feed(Slice data) { CHECK(impl_); CHECK(is_inited_); - int err = SHA256_Update(&impl_->ctx_, data.ubegin(), data.size()); - LOG_IF(FATAL, err != 1); + CHECK(EVP_DigestUpdate(impl_->ctx_, data.ubegin(), data.size()) == 1); } void Sha256State::extract(MutableSlice output, bool destroy) { CHECK(output.size() >= 32); CHECK(impl_); CHECK(is_inited_); - int err = SHA256_Final(output.ubegin(), &impl_->ctx_); - LOG_IF(FATAL, err != 1); + unsigned size; + CHECK(EVP_DigestFinal_ex(impl_->ctx_, output.ubegin(), &size) == 1); + CHECK(size == 32); is_inited_ = false; if (destroy) { impl_.reset(); diff --git a/tdutils/td/utils/crypto.h b/tdutils/td/utils/crypto.h index 592a8a00..4494ef48 100644 --- a/tdutils/td/utils/crypto.h +++ b/tdutils/td/utils/crypto.h @@ -151,7 +151,7 @@ class Sha256State { bool is_inited_ = false; }; -void md5(Slice input, MutableSlice output); +[[deprecated("MD5 is not cryptographically secure")]] void md5(Slice input, MutableSlice output); void pbkdf2_sha256(Slice password, Slice salt, int iteration_count, MutableSlice dest); void pbkdf2_sha512(Slice password, Slice salt, int iteration_count, MutableSlice dest); diff --git a/tdutils/td/utils/port/stacktrace.cpp b/tdutils/td/utils/port/stacktrace.cpp index 2c025d2e..e89daec6 100644 --- a/tdutils/td/utils/port/stacktrace.cpp +++ b/tdutils/td/utils/port/stacktrace.cpp @@ -20,9 +20,13 @@ #include "td/utils/port/signals.h" -#if __GLIBC__ +#if TD_WINDOWS +#include +#else +#if TD_DARWIN || __GLIBC__ #include #endif +#endif #if TD_LINUX || TD_FREEBSD #include @@ -39,13 +43,48 @@ namespace td { namespace { void print_backtrace(void) { -#if __GLIBC__ +#if TD_WINDOWS + void *stack[100]; + HANDLE process = GetCurrentProcess(); + SymInitialize(process, nullptr, 1); + unsigned frames = CaptureStackBackTrace(0, 100, stack, nullptr); + signal_safe_write("------- Stack Backtrace -------\n", false); + for (unsigned i = 0; i < frames; i++) { + td::uint8 symbol_buf[sizeof(SYMBOL_INFO) + 256]; + auto symbol = (SYMBOL_INFO *)symbol_buf; + memset(symbol_buf, 0, sizeof(symbol_buf)); + symbol->MaxNameLen = 255; + symbol->SizeOfStruct = sizeof(SYMBOL_INFO); + SymFromAddr(process, (DWORD64)(stack[i]), nullptr, symbol); + // Don't use sprintf here because it is not signal-safe + char buf[256 + 32]; + char* buf_ptr = buf; + if (frames - i - 1 < 10) { + strcpy(buf_ptr, " "); + buf_ptr += strlen(buf_ptr); + } + _itoa(frames - i - 1, buf_ptr, 10); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, ": ["); + buf_ptr += strlen(buf_ptr); + _ui64toa(td::uint64(symbol->Address), buf_ptr, 16); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, "] "); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, symbol->Name); + buf_ptr += strlen(buf_ptr); + strcpy(buf_ptr, "\n"); + signal_safe_write(td::Slice{buf, strlen(buf)}, false); + } +#else +#if TD_DARWIN || __GLIBC__ void *buffer[128]; int nptrs = backtrace(buffer, 128); signal_safe_write("------- Stack Backtrace -------\n", false); backtrace_symbols_fd(buffer, nptrs, 2); signal_safe_write("-------------------------------\n", false); #endif +#endif } void print_backtrace_gdb(void) { @@ -129,7 +168,7 @@ void Stacktrace::print_to_stderr(const PrintOptions &options) { } void Stacktrace::init() { -#if __GLIBC__ +#if TD_DARWIN || __GLIBC__ // backtrace needs to be called once to ensure that next calls are async-signal-safe void *buffer[1]; backtrace(buffer, 1); diff --git a/tdutils/test/MpmcWaiter.cpp b/tdutils/test/MpmcWaiter.cpp index 9cb5b363..d0a9fc84 100644 --- a/tdutils/test/MpmcWaiter.cpp +++ b/tdutils/test/MpmcWaiter.cpp @@ -75,9 +75,9 @@ void test_waiter_stress_one_one() { TEST(MpmcEagerWaiter, stress_one_one) { test_waiter_stress_one_one(); } -TEST(MpmcSleepyWaiter, stress_one_one) { - test_waiter_stress_one_one(); -} +// TEST(MpmcSleepyWaiter, stress_one_one) { +// test_waiter_stress_one_one(); +// } template void test_waiter_stress() { diff --git a/terminal/CMakeLists.txt b/terminal/CMakeLists.txt index ae8c70bd..af51153f 100644 --- a/terminal/CMakeLists.txt +++ b/terminal/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/test/ed25519_crypto.cpp b/test/ed25519_crypto.cpp deleted file mode 100644 index 371b7247..00000000 --- a/test/ed25519_crypto.cpp +++ /dev/null @@ -1,2053 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include -#include -#include -#include -#include - -// ****************************************************** - -namespace openssl { -#include -} - -namespace arith { -struct dec_string { - std::string str; - explicit dec_string(const std::string& s) : str(s) { - } -}; - -struct hex_string { - std::string str; - explicit hex_string(const std::string& s) : str(s) { - } -}; -} // namespace arith - -namespace arith { - -using namespace openssl; - -inline void bn_assert(int cond); -BN_CTX* get_ctx(); - -class BignumBitref { - BIGNUM* ptr; - int n; - - public: - BignumBitref(BIGNUM& x, int _n) : ptr(&x), n(_n){}; - operator bool() const { - return BN_is_bit_set(ptr, n); - } - BignumBitref& operator=(bool val); -}; - -class Bignum { - BIGNUM val; - - public: - class bignum_error {}; - Bignum() { - BN_init(&val); - } - Bignum(long x) { - BN_init(&val); - set_long(x); - } - ~Bignum() { - BN_free(&val); - } - Bignum(const dec_string& ds) { - BN_init(&val); - set_dec_str(ds.str); - } - Bignum(const hex_string& hs) { - BN_init(&val); - set_hex_str(hs.str); - } - Bignum(const Bignum& x) { - BN_init(&val); - BN_copy(&val, &x.val); - } - //Bignum (Bignum&& x) { val = x.val; } - void clear() { - BN_clear(&val); - } // use this for sensitive data - Bignum& operator=(const Bignum& x) { - BN_copy(&val, &x.val); - return *this; - } - Bignum& operator=(Bignum&& x) { - swap(x); - return *this; - } - Bignum& operator=(long x) { - return set_long(x); - } - Bignum& operator=(const dec_string& ds) { - return set_dec_str(ds.str); - } - Bignum& operator=(const hex_string& hs) { - return set_hex_str(hs.str); - } - Bignum& swap(Bignum& x) { - BN_swap(&val, &x.val); - return *this; - } - BIGNUM* bn_ptr() { - return &val; - } - const BIGNUM* bn_ptr() const { - return &val; - } - bool is_zero() const { - return BN_is_zero(&val); - } - int sign() const { - return BN_is_zero(&val) ? 0 : (BN_is_negative(&val) ? -1 : 1); - } - bool odd() const { - return BN_is_odd(&val); - } - int num_bits() const { - return BN_num_bits(&val); - } - int num_bytes() const { - return BN_num_bytes(&val); - } - bool operator[](int n) const { - return BN_is_bit_set(&val, n); - } - BignumBitref operator[](int n) { - return BignumBitref(val, n); - } - void export_msb(unsigned char* buffer, std::size_t size) const; - Bignum& import_msb(const unsigned char* buffer, std::size_t size); - Bignum& import_msb(const std::string& s) { - return import_msb((const unsigned char*)s.c_str(), s.size()); - } - void export_lsb(unsigned char* buffer, std::size_t size) const; - Bignum& import_lsb(const unsigned char* buffer, std::size_t size); - Bignum& import_lsb(const std::string& s) { - return import_lsb((const unsigned char*)s.c_str(), s.size()); - } - - Bignum& set_dec_str(std::string s) { - BIGNUM* tmp = &val; - bn_assert(BN_dec2bn(&tmp, s.c_str())); - return *this; - } - - Bignum& set_hex_str(std::string s) { - BIGNUM* tmp = &val; - bn_assert(BN_hex2bn(&tmp, s.c_str())); - return *this; - } - - Bignum& set_ulong(unsigned long x) { - bn_assert(BN_set_word(&val, x)); - return *this; - } - - Bignum& set_long(long x) { - set_ulong(std::abs(x)); - return x < 0 ? negate() : *this; - } - - Bignum& negate() { - BN_set_negative(&val, !BN_is_negative(&val)); - return *this; - } - - Bignum& operator+=(const Bignum& y) { - bn_assert(BN_add(&val, &val, &y.val)); - return *this; - } - - Bignum& operator+=(long y) { - bn_assert((y >= 0 ? BN_add_word : BN_sub_word)(&val, std::abs(y))); - return *this; - } - - Bignum& operator-=(long y) { - bn_assert((y >= 0 ? BN_sub_word : BN_add_word)(&val, std::abs(y))); - return *this; - } - - Bignum& operator*=(const Bignum& y) { - bn_assert(BN_mul(&val, &val, &y.val, get_ctx())); - return *this; - } - - Bignum& operator*=(long y) { - if (y < 0) { - negate(); - } - bn_assert(BN_mul_word(&val, std::abs(y))); - return *this; - } - - Bignum& operator<<=(int r) { - bn_assert(BN_lshift(&val, &val, r)); - return *this; - } - - Bignum& operator>>=(int r) { - bn_assert(BN_rshift(&val, &val, r)); - return *this; - } - - Bignum& operator/=(const Bignum& y) { - Bignum w; - bn_assert(BN_div(&val, &w.val, &val, &y.val, get_ctx())); - return *this; - } - - Bignum& operator/=(long y) { - bn_assert(BN_div_word(&val, std::abs(y)) != (BN_ULONG)(-1)); - return y < 0 ? negate() : *this; - } - - Bignum& operator%=(const Bignum& y) { - bn_assert(BN_mod(&val, &val, &y.val, get_ctx())); - return *this; - } - - Bignum& operator%=(long y) { - BN_ULONG rem = BN_mod_word(&val, std::abs(y)); - bn_assert(rem != (BN_ULONG)(-1)); - return set_long(y < 0 ? -rem : rem); - } - - unsigned long divmod(unsigned long y) { - BN_ULONG rem = BN_div_word(&val, y); - bn_assert(rem != (BN_ULONG)(-1)); - return rem; - } - - const Bignum divmod(const Bignum& y); - - std::string to_str() const; - std::string to_hex() const; -}; - -inline void bn_assert(int cond) { - if (!cond) { - throw Bignum::bignum_error(); - } -} - -BN_CTX* get_ctx(void) { - static BN_CTX* ctx = BN_CTX_new(); - return ctx; -} - -BignumBitref& BignumBitref::operator=(bool val) { - if (val) { - BN_set_bit(ptr, n); - } else { - BN_clear_bit(ptr, n); - } - return *this; -} - -const Bignum operator+(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_add(z.bn_ptr(), x.bn_ptr(), y.bn_ptr())); - return z; -} - -const Bignum operator+(const Bignum& x, long y) { - if (y > 0) { - Bignum z(x); - bn_assert(BN_add_word(z.bn_ptr(), y)); - return z; - } else if (y < 0) { - Bignum z(x); - bn_assert(BN_sub_word(z.bn_ptr(), -y)); - return z; - } else { - return x; - } -} - -/* - const Bignum operator+ (Bignum&& x, long y) { - if (y > 0) { - bn_assert (BN_add_word (x.bn_ptr(), y)); - } else if (y < 0) { - bn_assert (BN_sub_word (x.bn_ptr(), -y)); - } - return std::move (x); - } - */ - -const Bignum operator+(long y, const Bignum& x) { - return x + y; -} - -/* - const Bignum operator+ (long y, Bignum&& x) { - return x + y; - } - */ - -const Bignum operator-(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_sub(z.bn_ptr(), x.bn_ptr(), y.bn_ptr())); - return z; -} - -const Bignum operator-(const Bignum& x, long y) { - return x + (-y); -} - -/* - const Bignum operator- (Bignum&& x, long y) { - return x + (-y); - } - */ - -const Bignum operator*(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_mul(z.bn_ptr(), x.bn_ptr(), y.bn_ptr(), get_ctx())); - return z; -} - -const Bignum operator*(const Bignum& x, long y) { - if (y > 0) { - Bignum z(x); - bn_assert(BN_mul_word(z.bn_ptr(), y)); - return z; - } else if (y < 0) { - Bignum z(x); - z.negate(); - bn_assert(BN_mul_word(z.bn_ptr(), -y)); - return z; - } else { - Bignum z(0); - return z; - } -} - -/* - const Bignum operator* (Bignum&& x, long y) { - if (y > 0) { - bn_assert (BN_mul_word (x.bn_ptr(), y)); - } else if (y < 0) { - x.negate(); - bn_assert (BN_mul_word (x.bn_ptr(), -y)); - } else { - x = 0; - } - return std::move (x); - } - */ - -const Bignum operator*(long y, const Bignum& x) { - return x * y; -} - -const Bignum operator/(const Bignum& x, const Bignum& y) { - Bignum z, w; - bn_assert(BN_div(z.bn_ptr(), w.bn_ptr(), x.bn_ptr(), y.bn_ptr(), get_ctx())); - return z; -} - -const Bignum Bignum::divmod(const Bignum& y) { - Bignum w; - bn_assert(BN_div(&val, w.bn_ptr(), &val, y.bn_ptr(), get_ctx())); - return w; -} - -const Bignum operator%(const Bignum& x, const Bignum& y) { - Bignum z; - bn_assert(BN_mod(z.bn_ptr(), x.bn_ptr(), y.bn_ptr(), get_ctx())); - return z; -} - -unsigned long operator%(const Bignum& x, unsigned long y) { - BN_ULONG rem = BN_mod_word(x.bn_ptr(), y); - bn_assert(rem != (BN_ULONG)(-1)); - return rem; -} - -const Bignum operator<<(const Bignum& x, int r) { - Bignum z; - bn_assert(BN_lshift(z.bn_ptr(), x.bn_ptr(), r)); - return z; -} - -const Bignum operator>>(const Bignum& x, int r) { - Bignum z; - bn_assert(BN_rshift(z.bn_ptr(), x.bn_ptr(), r)); - return z; -} - -const Bignum abs(const Bignum& x) { - Bignum T(x); - if (T.sign() < 0) { - T.negate(); - } - return T; -} - -const Bignum sqr(const Bignum& x) { - Bignum z; - bn_assert(BN_sqr(z.bn_ptr(), x.bn_ptr(), get_ctx())); - return z; -} - -void Bignum::export_msb(unsigned char* buffer, std::size_t size) const { - bn_assert(size >= 0 && size <= (1 << 20)); - bn_assert(sign() >= 0); - int n = BN_num_bytes(&val); - bn_assert(n >= 0 && (unsigned)n <= size); - bn_assert(BN_bn2bin(&val, buffer + size - n) == n); - std::memset(buffer, 0, size - n); -} - -Bignum& Bignum::import_msb(const unsigned char* buffer, std::size_t size) { - bn_assert(size >= 0 && size <= (1 << 20)); - std::size_t i = 0; - while (i < size && !buffer[i]) { - i++; - } - bn_assert(BN_bin2bn(buffer + i, size - i, &val) == &val); - return *this; -} - -void Bignum::export_lsb(unsigned char* buffer, std::size_t size) const { - bn_assert(size >= 0 && size <= (1 << 20)); - bn_assert(sign() >= 0); - std::size_t n = BN_num_bytes(&val); - bn_assert(n >= 0 && (unsigned)n <= size); - bn_assert(BN_bn2bin(&val, buffer) == (int)n); - std::memset(buffer + n, 0, size - n); - for (std::size_t i = 0; 2 * i + 1 < n; i++) { - std::swap(buffer[i], buffer[n - 1 - i]); - } -} - -Bignum& Bignum::import_lsb(const unsigned char* buffer, std::size_t size) { - bn_assert(size >= 0 && size <= (1 << 20)); - while (size > 0 && !buffer[size - 1]) { - size--; - } - if (!size) { - bn_assert(BN_zero(&val)); - return *this; - } - unsigned char tmp[size], *ptr = tmp + size; - for (std::size_t i = 0; i < size; i++) { - *--ptr = buffer[i]; - } - bn_assert(BN_bin2bn(tmp, size, &val) == &val); - return *this; -} - -int cmp(const Bignum& x, const Bignum& y) { - return BN_cmp(x.bn_ptr(), y.bn_ptr()); -} - -bool operator==(const Bignum& x, const Bignum& y) { - return cmp(x, y) == 0; -} - -bool operator!=(const Bignum& x, const Bignum& y) { - return cmp(x, y) != 0; -} - -bool operator<(const Bignum& x, const Bignum& y) { - return cmp(x, y) < 0; -} - -bool operator<=(const Bignum& x, const Bignum& y) { - return cmp(x, y) <= 0; -} - -bool operator>(const Bignum& x, const Bignum& y) { - return cmp(x, y) > 0; -} - -bool operator>=(const Bignum& x, const Bignum& y) { - return cmp(x, y) >= 0; -} - -bool operator==(const Bignum& x, long y) { - if (y >= 0) { - return BN_is_word(x.bn_ptr(), y); - } else { - return x == Bignum(y); - } -} - -bool operator!=(const Bignum& x, long y) { - if (y >= 0) { - return !BN_is_word(x.bn_ptr(), y); - } else { - return x != Bignum(y); - } -} - -std::string Bignum::to_str() const { - char* ptr = BN_bn2dec(&val); - std::string z(ptr); - OPENSSL_free(ptr); - return z; -} - -std::string Bignum::to_hex() const { - char* ptr = BN_bn2hex(&val); - std::string z(ptr); - OPENSSL_free(ptr); - return z; -} - -std::ostream& operator<<(std::ostream& os, const Bignum& x) { - return os << x.to_str(); -} - -std::istream& operator>>(std::istream& is, Bignum& x) { - std::string word; - is >> word; - x = dec_string(word); - return is; -} - -bool is_prime(const Bignum& p, int nchecks = 64, bool trial_div = true) { - return BN_is_prime_fasttest_ex(p.bn_ptr(), BN_prime_checks, get_ctx(), trial_div, 0); -} -} // namespace arith - -namespace arith { -using namespace openssl; - -class Residue; -class ResidueRing; - -class ResidueRing { - public: - struct bad_modulus {}; - struct elem_cnt_mismatch { - int cnt; - elem_cnt_mismatch(int x) : cnt(x) { - } - }; - - private: - const Bignum modulus; - mutable int cnt; - bool prime; - void cnt_assert(bool b) { - if (!b) { - throw elem_cnt_mismatch(cnt); - } - } - Residue* Zero; - Residue* One; - Residue* Img_i; - void init(); - - public: - typedef Residue element; - explicit ResidueRing(Bignum mod) : modulus(mod), cnt(0), prime(arith::is_prime(mod)), Zero(0), One(0) { - init(); - } - ~ResidueRing(); - int incr_count() { - return ++cnt; - } - int decr_count() { - --cnt; - cnt_assert(cnt >= 0); - return cnt; - } - const Bignum& get_modulus() const { - return modulus; - } - bool is_prime() const { - return prime; - } - const Residue& zero() const { - return *Zero; - } - const Residue& one() const { - return *One; - } - const Residue& img_i(); - Residue frac(long num, long denom = 1); - Residue convert(long num); - Residue convert(const Bignum& x); - - Bignum reduce(const Bignum& x) { - Bignum r = x % modulus; - if (r.sign() < 0) { - r += modulus; - } - return r; - } - - Bignum& do_reduce(Bignum& x) { - x %= modulus; - if (x.sign() < 0) { - x += modulus; - } - return x; - } -}; - -class Residue { - public: - struct not_same_ring {}; - - private: - ResidueRing* ring; - mutable Bignum val; - Residue& reduce() { - ring->do_reduce(val); - return *this; - } - - public: - explicit Residue(ResidueRing& R) : ring(&R) { - R.incr_count(); - } - Residue(const Bignum& x, ResidueRing& R) : ring(&R), val(R.reduce(x)) { - R.incr_count(); - } - ~Residue() { - ring->decr_count(); - ring = 0; - } - Residue(const Residue& x) : ring(x.ring), val(x.val) { - ring->incr_count(); - } - Bignum extract() const { - return val; - } - const Bignum& extract_raw() const { - return val; - } - const Bignum& modulus() const { - return ring->get_modulus(); - } - void same_ring(const Residue& y) const { - if (ring != y.ring) { - throw not_same_ring(); - } - } - ResidueRing& ring_of() const { - return *ring; - } - bool is_zero() const { - return (val == 0); - } - Residue& operator=(const Residue& x) { - same_ring(x); - val = x.val; - return *this; - } - Residue& operator=(const Bignum& x) { - val = ring->reduce(x); - return *this; - } - Residue& operator+=(const Residue& y); - Residue& operator-=(const Residue& y); - Residue& operator*=(const Residue& y); - Residue& operator+=(long y) { - val += y; - return reduce(); - } - Residue& operator-=(long y) { - val -= y; - return reduce(); - } - Residue& operator*=(long y) { - val *= y; - return reduce(); - } - Residue& negate() { - val.negate(); - return reduce(); - } - friend const Residue operator+(const Residue& x, const Residue& y); - friend const Residue operator-(const Residue& x, const Residue& y); - friend const Residue operator*(const Residue& x, const Residue& y); - friend const Residue operator-(const Residue& x); - friend Residue sqr(const Residue& x); - friend Residue power(const Residue& x, const Bignum& y); - friend Residue inverse(const Residue& x); - std::string to_str() const; -}; - -void ResidueRing::init() { - Zero = new Residue(0, *this); - One = new Residue(1, *this); -} - -ResidueRing::~ResidueRing() { - delete Zero; - delete One; - Zero = One = 0; - cnt_assert(!cnt); -} - -const Residue operator+(const Residue& x, const Residue& y) { - x.same_ring(y); - Residue z(x.ring_of()); - bn_assert(BN_mod_add(z.val.bn_ptr(), x.val.bn_ptr(), y.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -const Residue operator-(const Residue& x, const Residue& y) { - x.same_ring(y); - Residue z(x.ring_of()); - bn_assert(BN_mod_sub(z.val.bn_ptr(), x.val.bn_ptr(), y.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -const Residue operator*(const Residue& x, const Residue& y) { - x.same_ring(y); - Residue z(x.ring_of()); - bn_assert(BN_mod_mul(z.val.bn_ptr(), x.val.bn_ptr(), y.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -const Residue operator-(const Residue& x) { - Residue z(x); - z.val.negate(); - return z.reduce(); -} - -Residue& Residue::operator+=(const Residue& y) { - same_ring(y); - bn_assert(BN_mod_add(val.bn_ptr(), val.bn_ptr(), y.val.bn_ptr(), modulus().bn_ptr(), get_ctx())); - return *this; -} - -Residue& Residue::operator-=(const Residue& y) { - same_ring(y); - bn_assert(BN_mod_sub(val.bn_ptr(), val.bn_ptr(), y.val.bn_ptr(), modulus().bn_ptr(), get_ctx())); - return *this; -} - -Residue& Residue::operator*=(const Residue& y) { - same_ring(y); - bn_assert(BN_mod_mul(val.bn_ptr(), val.bn_ptr(), y.val.bn_ptr(), modulus().bn_ptr(), get_ctx())); - return *this; -} - -bool operator==(const Residue& x, const Residue& y) { - x.same_ring(y); - return x.extract() == y.extract(); -} - -bool operator!=(const Residue& x, const Residue& y) { - x.same_ring(y); - return x.extract() != y.extract(); -} - -Residue sqr(const Residue& x) { - Residue z(x.ring_of()); - bn_assert(BN_mod_sqr(z.val.bn_ptr(), x.val.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -Residue power(const Residue& x, const Bignum& y) { - Residue z(x.ring_of()); - bn_assert(BN_mod_exp(z.val.bn_ptr(), x.val.bn_ptr(), y.bn_ptr(), x.modulus().bn_ptr(), get_ctx())); - return z; -} - -Residue inverse(const Residue& x) { - assert(x.ring_of().is_prime()); - return power(x, x.ring_of().get_modulus() - 2); -} - -const Residue& ResidueRing::img_i() { - if (!Img_i) { - assert(is_prime()); - assert(modulus % 4 == 1); - int g = 2; - Bignum n = (modulus - 1) / 4; - while (true) { - Residue t = power(frac(g), n); - if (t != one() && t != frac(-1)) { - Img_i = new Residue(t); - break; - } - } - } - return *Img_i; -} - -Residue sqrt(const Residue& x) { - assert(x.ring_of().is_prime()); - ResidueRing& R = x.ring_of(); - const Bignum& p = R.get_modulus(); - if (x.is_zero() || !p.odd()) { - return x; - } - if (p[1]) { // p=3 (mod 4) - return power(x, (p + 1) >> 2); - } else if (p[2]) { - // p=5 (mod 8) - Residue t = power(x, (p + 3) >> 3); - return (sqr(t) == x) ? t : R.img_i() * t; - } else { - assert(p[2]); - return R.zero(); - } -} - -Residue ResidueRing::frac(long num, long denom) { - assert(denom); - if (denom < 0) { - num = -num; - denom = -denom; - } - if (!(num % denom)) { - return Residue(num / denom, *this); - } else { - return Residue(num, *this) * inverse(Residue(denom, *this)); - } -} - -inline Residue ResidueRing::convert(long x) { - return Residue(x, *this); -} - -inline Residue ResidueRing::convert(const Bignum& x) { - return Residue(x, *this); -} - -std::string Residue::to_str() const { - return "Mod(" + val.to_str() + "," + modulus().to_str() + ")"; -} - -std::ostream& operator<<(std::ostream& os, const Residue& x) { - return os << x.to_str(); -} - -std::istream& operator>>(std::istream& is, Residue& x) { - std::string word; - is >> word; - x = dec_string(word); - return is; -} -} // namespace arith - -// ****************************************************** - -namespace ellcurve { -using namespace arith; - -const Bignum& P25519() { - static Bignum P25519 = (Bignum(1) << 255) - 19; - return P25519; -} - -ResidueRing& Fp25519() { - static ResidueRing Fp25519(P25519()); - return Fp25519; -} -} // namespace ellcurve - -// ****************************************************** - -namespace ellcurve { -using namespace arith; - -class MontgomeryCurve { - ResidueRing& ring; - int A_short; // v^2 = u^2 + Au + 1 - int Gu_short; // u(G) - int a_short; // (A+2)/4 - Residue A; - Residue Gu; - Bignum P; - Bignum L; - Bignum Order; - Bignum cofactor; - int cofactor_short; - - void init(); - - public: - MontgomeryCurve(int _A, int _Gu, ResidueRing& _R) - : ring(_R) - , A_short(_A) - , Gu_short(_Gu) - , a_short((_A + 2) / 4) - , A(_A, _R) - , Gu(_Gu, _R) - , P(_R.get_modulus()) - , cofactor_short(0) { - init(); - } - - const Residue& get_gen_u() const { - return Gu; - } - const Bignum& get_ell() const { - return L; - } - const Bignum& get_order() const { - return Order; - } - ResidueRing& get_base_ring() const { - return ring; - } - const Bignum& get_p() const { - return P; - } - - void set_order_cofactor(const Bignum& order, int cof); - - struct PointXZ { - Residue X, Z; - PointXZ(Residue x, Residue z) : X(x), Z(z) { - x.same_ring(z); - } - PointXZ(ResidueRing& r) : X(r.one()), Z(r.zero()) { - } - explicit PointXZ(Residue u) : X(u), Z(u.ring_of().one()) { - } - explicit PointXZ(Residue y, bool) : X(y.ring_of().one() - y), Z(y + y.ring_of().one()) { - } - PointXZ(const PointXZ& P) : X(P.X), Z(P.Z) { - } - PointXZ& operator=(const PointXZ& P) { - X = P.X; - Z = P.Z; - return *this; - } - Residue get_u() const { - return X * inverse(Z); - } - Residue get_v(bool sign_v = false) const; - bool is_infty() const { - return Z.is_zero(); - } - Residue get_y() const { - return (X - Z) * inverse(X + Z); - } - bool export_point_y(unsigned char buffer[32]) const; - bool export_point_u(unsigned char buffer[32]) const; - void zeroize() { - X = Z = Z.ring_of().zero(); - } - }; - - PointXZ power_gen_xz(const Bignum& n) const; - PointXZ power_xz(const Residue& u, const Bignum& n) const; - PointXZ power_xz(const PointXZ& P, const Bignum& n) const; - PointXZ add_xz(const PointXZ& P, const PointXZ& Q) const; - PointXZ double_xz(const PointXZ& P) const; - - PointXZ import_point_u(const unsigned char point[32]) const; - PointXZ import_point_y(const unsigned char point[32]) const; -}; - -void MontgomeryCurve::init() { - assert(!((a_short + 2) & 3) && a_short >= 0); -} - -void MontgomeryCurve::set_order_cofactor(const Bignum& order, int cof) { - assert(order > 0); - assert(cof >= 0); - assert(cof == 0 || (order % cof) == 0); - Order = order; - cofactor = cofactor_short = cof; - if (cof > 0) { - L = order / cof; - assert(is_prime(L)); - } - assert(!power_gen_xz(1).is_infty()); - assert(power_gen_xz(Order).is_infty()); -} - -// computes u(P+Q)*u(P-Q) as X/Z -MontgomeryCurve::PointXZ MontgomeryCurve::add_xz(const MontgomeryCurve::PointXZ& P, - const MontgomeryCurve::PointXZ& Q) const { - Residue u = (P.X + P.Z) * (Q.X - Q.Z); - Residue v = (P.X - P.Z) * (Q.X + Q.Z); - return MontgomeryCurve::PointXZ(sqr(u + v), sqr(u - v)); -} - -// computes u(2P) as X/Z -MontgomeryCurve::PointXZ MontgomeryCurve::double_xz(const MontgomeryCurve::PointXZ& P) const { - Residue u = sqr(P.X + P.Z); - Residue v = sqr(P.X - P.Z); - Residue w = u - v; - return PointXZ(u * v, w * (v + Residue(a_short, ring) * w)); -} - -MontgomeryCurve::PointXZ MontgomeryCurve::power_gen_xz(const Bignum& n) const { - return power_xz(Gu, n); -} - -MontgomeryCurve::PointXZ MontgomeryCurve::power_xz(const Residue& u, const Bignum& n) const { - return power_xz(PointXZ(u), n); -} - -// computes u([n]P) in form X/Z -MontgomeryCurve::PointXZ MontgomeryCurve::power_xz(const PointXZ& A, const Bignum& n) const { - assert(n >= 0); - if (n == 0) { - return PointXZ(ring); - } - - int k = n.num_bits(); - PointXZ P(A); - PointXZ Q(double_xz(P)); - for (int i = k - 2; i >= 0; --i) { - PointXZ PQ(add_xz(P, Q)); - PQ.X *= A.Z; - PQ.Z *= A.X; - if (n[i]) { - P = PQ; - Q = double_xz(Q); - } else { - Q = PQ; - P = double_xz(P); - } - } - return P; -} - -bool MontgomeryCurve::PointXZ::export_point_y(unsigned char buffer[32]) const { - if ((X + Z).is_zero()) { - std::memset(buffer, 0xff, 32); - return false; - } else { - get_y().extract().export_lsb(buffer, 32); - return true; - } -} - -bool MontgomeryCurve::PointXZ::export_point_u(unsigned char buffer[32]) const { - if (Z.is_zero()) { - std::memset(buffer, 0xff, 32); - return false; - } else { - get_u().extract().export_lsb(buffer, 32); - return true; - } -} - -MontgomeryCurve::PointXZ MontgomeryCurve::import_point_u(const unsigned char point[32]) const { - Bignum u; - u.import_lsb(point, 32); - u[255] = 0; - return PointXZ(Residue(u, ring)); -} - -MontgomeryCurve::PointXZ MontgomeryCurve::import_point_y(const unsigned char point[32]) const { - Bignum y; - y.import_lsb(point, 32); - y[255] = 0; - return PointXZ(Residue(y, ring), true); -} - -MontgomeryCurve& Curve25519() { - static MontgomeryCurve Curve25519(486662, 9, Fp25519()); - static bool init = false; - if (!init) { - Curve25519.set_order_cofactor(hex_string{"80000000000000000000000000000000a6f7cef517bce6b2c09318d2e7ae9f68"}, 8); - init = true; - } - return Curve25519; -} -} // namespace ellcurve - -// ****************************************************** - -namespace ellcurve { -using namespace arith; - -class TwEdwardsCurve; - -class TwEdwardsCurve { - public: - struct SegrePoint { - Residue XY, X, Y, Z; // if x=X/Z and y=Y/T, stores (xy,x,y,1)*Z*T - SegrePoint(ResidueRing& R) : XY(R), X(R), Y(R), Z(R) { - } - SegrePoint(const Residue& x, const Residue& y) : XY(x * y), X(x), Y(y), Z(y.ring_of().one()) { - } - SegrePoint(const TwEdwardsCurve& E, const Residue& y, bool x_sign); - SegrePoint(const SegrePoint& P) : XY(P.XY), X(P.X), Y(P.Y), Z(P.Z) { - } - SegrePoint& operator=(const SegrePoint& P) { - XY = P.XY; - X = P.X; - Y = P.Y; - Z = P.Z; - return *this; - } - bool is_zero() const { - return X.is_zero() && (Y == Z); - } - bool is_valid() const { - return (XY * Z == X * Y) && !(XY.is_zero() && X.is_zero() && Y.is_zero() && Z.is_zero()); - } - bool is_finite() const { - return !Z.is_zero(); - } - bool is_normalized() const { - return Z == Z.ring_of().one(); - } - SegrePoint& normalize() { - auto f = inverse(Z); - XY *= f; - X *= f; - Y *= f; - Z = Z.ring_of().one(); - return *this; - } - SegrePoint& zeroize() { - XY = X = Y = Z = Z.ring_of().zero(); - return *this; - } - bool export_point(unsigned char buffer[32], bool need_x = true) const; - bool export_point_y(unsigned char buffer[32]) const { - return export_point(buffer, false); - } - bool export_point_u(unsigned char buffer[32]) const; - Residue get_y() const { - return Y * inverse(Z); - } - Residue get_x() const { - return X * inverse(Z); - } - Residue get_u() const { - return (Z + Y) * inverse(Z - Y); - } - void negate() { - XY.negate(); - X.negate(); - } - }; - - private: - ResidueRing& ring; - Residue D; - Residue D2; - Residue Gy; - Bignum P; - Bignum L; - Bignum Order; - Bignum cofactor; - int cofactor_short; - SegrePoint G; - SegrePoint O; - void init(); - - public: - TwEdwardsCurve(const Residue& _D, const Residue& _Gy, ResidueRing& _R) - : ring(_R), D(_D), D2(_D + _D), Gy(_Gy), P(_R.get_modulus()), cofactor_short(0), G(_R), O(_R) { - init(); - } - - const Residue& get_gen_y() const { - return Gy; - } - const Bignum& get_ell() const { - return L; - } - const Bignum& get_order() const { - return Order; - } - ResidueRing& get_base_ring() const { - return ring; - } - const Bignum& get_p() const { - return P; - } - const SegrePoint& get_base_point() const { - return G; - } - - void set_order_cofactor(const Bignum& order, int cof); - bool recover_x(Residue& x, const Residue& y, bool x_sign) const; - - void add_points(SegrePoint& R, const SegrePoint& P, const SegrePoint& Q) const; - SegrePoint add_points(const SegrePoint& P, const SegrePoint& Q) const; - void double_point(SegrePoint& R, const SegrePoint& P) const; - SegrePoint double_point(const SegrePoint& P) const; - SegrePoint power_point(const SegrePoint& A, const Bignum& n) const; - SegrePoint power_gen(const Bignum& n) const; - - SegrePoint import_point(const unsigned char point[32], bool& ok) const; -}; - -std::ostream& operator<<(std::ostream& os, const TwEdwardsCurve::SegrePoint& P) { - return os << "[" << P.XY << ":" << P.X << ":" << P.Y << ":" << P.Z << "]"; -} - -void TwEdwardsCurve::init() { - assert(D != ring.zero() && D != ring.convert(-1)); - O.X = O.Z = ring.one(); - G = SegrePoint(*this, Gy, 0); - assert(!G.XY.is_zero()); -} - -void TwEdwardsCurve::set_order_cofactor(const Bignum& order, int cof) { - assert(order > 0); - assert(cof >= 0); - assert(cof == 0 || (order % cof) == 0); - Order = order; - cofactor = cofactor_short = cof; - if (cof > 0) { - L = order / cof; - assert(is_prime(L)); - assert(!power_gen(1).is_zero()); - assert(power_gen(L).is_zero()); - } -} - -TwEdwardsCurve::SegrePoint::SegrePoint(const TwEdwardsCurve& E, const Residue& y, bool x_sign) - : XY(y), X(E.get_base_ring()), Y(y), Z(E.get_base_ring().one()) { - Residue x(y.ring_of()); - if (E.recover_x(x, y, x_sign)) { - XY *= x; - X = x; - } else { - XY = Y = Z = E.get_base_ring().zero(); - } -} - -bool TwEdwardsCurve::recover_x(Residue& x, const Residue& y, bool x_sign) const { - // recovers x from equation -x^2+y^2 = 1+d*x^2*y^2 - Residue z = inverse(ring.one() + D * sqr(y)); - if (z.is_zero()) { - return false; - } - z *= sqr(y) - ring.one(); - Residue t = sqrt(z); - if (sqr(t) == z) { - x = (t.extract().odd() == x_sign) ? t : -t; - //std::cout << "x=" << x << ", y=" << y << std::endl; - return true; - } else { - return false; - } -} - -void TwEdwardsCurve::add_points(SegrePoint& Res, const SegrePoint& P, const SegrePoint& Q) const { - Residue a((P.X + P.Y) * (Q.X + Q.Y)); - Residue b((P.X - P.Y) * (Q.X - Q.Y)); - Residue c(P.Z * Q.Z * ring.convert(2)); - Residue d(P.XY * Q.XY * D2); - Residue x_num(a - b); // 2(x1y2+x2y1) - Residue y_num(a + b); // 2(x1x2+y1y2) - Residue x_den(c + d); // 2(1+dx1x2y1y2) - Residue y_den(c - d); // 2(1-dx1x2y1y2) - Res.X = x_num * y_den; // x = x_num/x_den, y = y_num/y_den - Res.Y = y_num * x_den; - Res.XY = x_num * y_num; - Res.Z = x_den * y_den; -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::add_points(const SegrePoint& P, const SegrePoint& Q) const { - SegrePoint Res(ring); - add_points(Res, P, Q); - return Res; -} - -void TwEdwardsCurve::double_point(SegrePoint& Res, const SegrePoint& P) const { - add_points(Res, P, P); -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::double_point(const SegrePoint& P) const { - SegrePoint Res(ring); - double_point(Res, P); - return Res; -} - -// computes u([n]P) in form (xy,x,y,1)*Z -TwEdwardsCurve::SegrePoint TwEdwardsCurve::power_point(const SegrePoint& A, const Bignum& n) const { - assert(n >= 0); - if (n == 0) { - return O; - } - - int k = n.num_bits(); - SegrePoint P(A); - SegrePoint Q(double_point(A)); - for (int i = k - 2; i >= 0; --i) { - if (n[i]) { - add_points(P, P, Q); - double_point(Q, Q); - } else { - // we do more operations than necessary for uniformicity - add_points(Q, P, Q); - double_point(P, P); - } - } - return P; -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::power_gen(const Bignum& n) const { - return power_point(G, n); -} - -bool TwEdwardsCurve::SegrePoint::export_point(unsigned char buffer[32], bool need_x) const { - if (!is_normalized()) { - if (Z.is_zero()) { - std::memset(buffer, 0xff, 32); - return false; - } - Residue f(inverse(Z)); - Bignum y((Y * f).extract()); - assert(!y[255]); - if (need_x) { - y[255] = (X * f).extract().odd(); - } - y.export_lsb(buffer, 32); - } else { - Bignum y(Y.extract()); - assert(!y[255]); - if (need_x) { - y[255] = X.extract().odd(); - } - y.export_lsb(buffer, 32); - } - return true; -} - -bool TwEdwardsCurve::SegrePoint::export_point_u(unsigned char buffer[32]) const { - if (Z == Y) { - std::memset(buffer, 0xff, 32); - return false; - } - Residue f(inverse(Z - Y)); - ((Z + Y) * f).extract().export_lsb(buffer, 32); - assert(!(buffer[31] & 0x80)); - return true; -} - -TwEdwardsCurve::SegrePoint TwEdwardsCurve::import_point(const unsigned char point[32], bool& ok) const { - Bignum y; - y.import_lsb(point, 32); - bool x_sign = y[255]; - y[255] = 0; - Residue yr(y, ring); - Residue xr(ring); - ok = recover_x(xr, yr, x_sign); - return ok ? SegrePoint(xr, yr) : SegrePoint(ring); -} - -TwEdwardsCurve& Ed25519() { - static TwEdwardsCurve Ed25519(Fp25519().frac(-121665, 121666), Fp25519().frac(4, 5), Fp25519()); - static bool init = false; - if (!init) { - Ed25519.set_order_cofactor(hex_string{"80000000000000000000000000000000a6f7cef517bce6b2c09318d2e7ae9f68"}, 8); - init = true; - } - return Ed25519; -} -} // namespace ellcurve - -// ****************************************************** - -namespace openssl { -#include -} - -namespace digest { -using namespace openssl; - -struct OpensslEVP_SHA1 { - enum { digest_bytes = 20 }; - static const EVP_MD* get_evp() { - return EVP_sha1(); - } -}; - -struct OpensslEVP_SHA256 { - enum { digest_bytes = 32 }; - static const EVP_MD* get_evp() { - return EVP_sha256(); - } -}; - -struct OpensslEVP_SHA512 { - enum { digest_bytes = 64 }; - static const EVP_MD* get_evp() { - return EVP_sha512(); - } -}; - -template -class HashCtx { - EVP_MD_CTX ctx; - void init(); - void clear(); - - public: - enum { digest_bytes = H::digest_bytes }; - HashCtx() { - init(); - } - HashCtx(const void* data, std::size_t len) { - init(); - feed(data, len); - } - ~HashCtx() { - clear(); - } - void feed(const void* data, std::size_t len); - std::size_t extract(unsigned char buffer[digest_bytes]); - std::string extract(); -}; - -template -void HashCtx::init() { - EVP_MD_CTX_init(&ctx); - EVP_DigestInit_ex(&ctx, H::get_evp(), 0); -} - -template -void HashCtx::clear() { - EVP_MD_CTX_cleanup(&ctx); -} - -template -void HashCtx::feed(const void* data, std::size_t len) { - EVP_DigestUpdate(&ctx, data, len); -} - -template -std::size_t HashCtx::extract(unsigned char buffer[digest_bytes]) { - unsigned olen = 0; - EVP_DigestFinal_ex(&ctx, buffer, &olen); - assert(olen == digest_bytes); - return olen; -} - -template -std::string HashCtx::extract() { - unsigned char buffer[digest_bytes]; - unsigned olen = 0; - EVP_DigestFinal_ex(&ctx, buffer, &olen); - assert(olen == digest_bytes); - return std::string((char*)buffer, olen); -} - -typedef HashCtx SHA1; -typedef HashCtx SHA256; -typedef HashCtx SHA512; - -template -std::size_t hash_str(unsigned char buffer[T::digest_bytes], const void* data, std::size_t size) { - T hasher(data, size); - return hasher.extract(buffer); -} - -template -std::size_t hash_two_str(unsigned char buffer[T::digest_bytes], const void* data1, std::size_t size1, const void* data2, - std::size_t size2) { - T hasher(data1, size1); - hasher.feed(data2, size2); - return hasher.extract(buffer); -} - -template -std::string hash_str(const void* data, std::size_t size) { - T hasher(data, size); - return hasher.extract(); -} - -template -std::string hash_two_str(const void* data1, std::size_t size1, const void* data2, std::size_t size2) { - T hasher(data1, size1); - hasher.feed(data2, size2); - return hasher.extract(); -} -} // namespace digest - -// ****************************************************** - -namespace openssl { -#include -} - -#include -#include - -namespace prng { - -int os_get_random_bytes(void* buf, int n) { - using namespace std; - int r = 0, h = open("/dev/random", O_RDONLY | O_NONBLOCK); - if (h >= 0) { - r = read(h, buf, n); - if (r > 0) { - //std::cerr << "added " << r << " bytes of real entropy to secure random numbers seed" << std::endl; - } else { - r = 0; - } - close(h); - } - - if (r < n) { - h = open("/dev/urandom", O_RDONLY); - if (h < 0) { - return r; - } - int s = read(h, (char*)buf + r, n - r); - close(h); - if (s < 0) { - return r; - } - r += s; - } - - if (r >= 8) { - *(long*)buf ^= lrand48(); - srand48(*(long*)buf); - } - - return r; -} -} // namespace prng - -namespace prng { -using namespace openssl; - -class RandomGen { - public: - struct rand_error {}; - void randomize(bool force = true); - void seed_add(const void* data, std::size_t size, double entropy = 0); - bool ok() const { - return RAND_status(); - } - RandomGen() { - randomize(false); - } - RandomGen(const void* seed, std::size_t size) { - seed_add(seed, size); - randomize(false); - } - bool rand_bytes(void* data, std::size_t size, bool strong = false); - bool strong_rand_bytes(void* data, std::size_t size) { - return rand_bytes(data, size, true); - } - template - bool rand_obj(T& obj) { - return rand_bytes(&obj, sizeof(T)); - } - template - bool rand_objs(T* ptr, std::size_t count) { - return rand_bytes(ptr, sizeof(T) * count); - } - std::string rand_string(std::size_t size, bool strong = false); -}; - -void RandomGen::seed_add(const void* data, std::size_t size, double entropy) { - RAND_add(data, size, entropy > 0 ? entropy : size); -} - -void RandomGen::randomize(bool force) { - if (!force && ok()) { - return; - } - unsigned char buffer[128]; - int n = os_get_random_bytes(buffer, 128); - seed_add(buffer, n); - assert(ok()); -} - -bool RandomGen::rand_bytes(void* data, std::size_t size, bool strong) { - int res = (strong ? RAND_bytes : RAND_pseudo_bytes)((unsigned char*)data, size); - if (res != 0 && res != 1) { - throw rand_error(); - } - return res; -} - -std::string RandomGen::rand_string(std::size_t size, bool strong) { - char buffer[size]; - if (!rand_bytes(buffer, size, strong)) { - throw rand_error(); - } - return std::string(buffer, size); -} - -RandomGen& rand_gen() { - static RandomGen MainPRNG; - return MainPRNG; -} - -} // namespace prng - -// ****************************************************** - -namespace crypto { -namespace Ed25519 { - -const int privkey_bytes = 32; -const int pubkey_bytes = 32; -const int sign_bytes = 64; -const int shared_secret_bytes = 32; - -bool all_bytes_same(const unsigned char* str, std::size_t size) { - unsigned char c = str[0]; - for (std::size_t i = 0; i < size; i++) { - if (str[i] != c) { - return false; - } - } - return true; -} - -class PublicKey { - enum { pk_empty, pk_xz, pk_init } inited; - unsigned char pubkey[pubkey_bytes]; - ellcurve::TwEdwardsCurve::SegrePoint PubKey; - ellcurve::MontgomeryCurve::PointXZ PubKey_xz; - - public: - PublicKey() : inited(pk_empty), PubKey(ellcurve::Fp25519()), PubKey_xz(ellcurve::Fp25519()) { - } - PublicKey(const unsigned char pub_key[pubkey_bytes]); - PublicKey(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key); - - bool import_public_key(const unsigned char pub_key[pubkey_bytes]); - bool import_public_key(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key); - bool export_public_key(unsigned char pubkey_buffer[pubkey_bytes]) const; - bool check_message_signature(unsigned char signature[sign_bytes], const unsigned char* message, std::size_t msg_size); - - void clear(); - bool ok() const { - return inited == pk_init; - } - - const unsigned char* get_pubkey_ptr() const { - return inited == pk_init ? pubkey : 0; - } - const ellcurve::TwEdwardsCurve::SegrePoint& get_point() const { - return PubKey; - } - const ellcurve::MontgomeryCurve::PointXZ& get_point_xz() const { - return PubKey_xz; - } -}; - -void PublicKey::clear(void) { - if (inited != pk_empty) { - std::memset(pubkey, 0, pubkey_bytes); - PubKey.zeroize(); - PubKey_xz.zeroize(); - } - inited = pk_empty; -} - -PublicKey::PublicKey(const unsigned char pub_key[pubkey_bytes]) - : inited(pk_empty), PubKey(ellcurve::Fp25519()), PubKey_xz(ellcurve::Fp25519()) { - import_public_key(pub_key); -} - -PublicKey::PublicKey(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key) - : inited(pk_empty), PubKey(ellcurve::Fp25519()), PubKey_xz(ellcurve::Fp25519()) { - import_public_key(Pub_Key); -} - -bool PublicKey::import_public_key(const unsigned char pub_key[pubkey_bytes]) { - clear(); - if (all_bytes_same(pub_key, pubkey_bytes)) { - return false; - } - bool ok = false; - PubKey = ellcurve::Ed25519().import_point(pub_key, ok); - if (!ok) { - clear(); - return false; - } - std::memcpy(pubkey, pub_key, pubkey_bytes); - PubKey_xz.X = PubKey.Z + PubKey.Y; - PubKey_xz.Z = PubKey.Z - PubKey.Y; - inited = pk_init; - return true; -} - -bool PublicKey::import_public_key(const ellcurve::TwEdwardsCurve::SegrePoint& Pub_Key) { - clear(); - if (!Pub_Key.is_valid()) { - return false; - } - PubKey = Pub_Key; - PubKey_xz.X = PubKey.Z + PubKey.Y; - PubKey_xz.Z = PubKey.Z - PubKey.Y; - inited = pk_init; - - if (!PubKey.export_point(pubkey)) { - clear(); - return false; - } - return true; -} - -bool PublicKey::export_public_key(unsigned char pubkey_buffer[pubkey_bytes]) const { - if (inited != pk_init) { - std::memset(pubkey_buffer, 0, pubkey_bytes); - return false; - } else { - std::memcpy(pubkey_buffer, pubkey, pubkey_bytes); - return true; - } -} - -bool PublicKey::check_message_signature(unsigned char signature[sign_bytes], const unsigned char* message, - std::size_t msg_size) { - if (inited != pk_init) { - return false; - } - unsigned char hash[64]; - { - digest::SHA512 hasher(signature, 32); - hasher.feed(pubkey, 32); - hasher.feed(message, msg_size); - hasher.extract(hash); - } - auto& E = ellcurve::Ed25519(); - const arith::Bignum& L = E.get_ell(); - arith::Bignum H, S; - S.import_lsb(signature + 32, 32); - H.import_lsb(hash, 64); - H %= L; - H = L - H; - auto sG = E.power_gen(S); - auto hA = E.power_point(PubKey, H); - auto pR1 = E.add_points(sG, hA); - unsigned char pR1_bytes[32]; - if (!pR1.export_point(pR1_bytes)) { - return false; - } - return !std::memcmp(pR1_bytes, signature, 32); -} - -class PrivateKey { - public: - struct priv_key_no_copy {}; - PrivateKey() : inited(false) { - std::memset(privkey, 0, privkey_bytes); - } - PrivateKey(const unsigned char pk[privkey_bytes]) : inited(false) { - std::memset(privkey, 0, privkey_bytes); - import_private_key(pk); - } - ~PrivateKey() { - clear(); - } - bool random_private_key(bool strong = false); - bool import_private_key(const unsigned char pk[privkey_bytes]); - bool export_private_key(unsigned char pk[privkey_bytes]) const; // careful! - bool export_public_key(unsigned char pubk[pubkey_bytes]) const { - return PubKey.export_public_key(pubk); - } - void clear(); - bool ok() const { - return inited; - } - - // used for EdDSA (sign) - bool sign_message(unsigned char signature[sign_bytes], const unsigned char* message, std::size_t msg_size); - // used for ECDH (encrypt / decrypt) - bool compute_shared_secret(unsigned char secret[shared_secret_bytes], const PublicKey& Pub); - // used for EC asymmetric decryption - bool compute_temp_shared_secret(unsigned char secret[shared_secret_bytes], - const unsigned char temp_pub_key[pubkey_bytes]); - - const PublicKey& get_public_key() const { - return PubKey; - } - - private: - bool inited; - unsigned char privkey[privkey_bytes]; - unsigned char priv_salt[32]; - arith::Bignum priv_exp; - PublicKey PubKey; - - bool process_private_key(); - PrivateKey(const PrivateKey&) { - throw priv_key_no_copy(); - } - PrivateKey& operator=(const PrivateKey&) { - throw priv_key_no_copy(); - } -}; - -bool PrivateKey::random_private_key(bool strong) { - inited = false; - if (!prng::rand_gen().rand_bytes(privkey, privkey_bytes, strong)) { - clear(); - return false; - } - return process_private_key(); -} - -void PrivateKey::clear(void) { - std::memset(privkey, 0, privkey_bytes); - std::memset(priv_salt, 0, sizeof(priv_salt)); - priv_exp.clear(); - PubKey.clear(); - inited = false; -} - -bool PrivateKey::import_private_key(const unsigned char pk[privkey_bytes]) { - clear(); - if (all_bytes_same(pk, privkey_bytes)) { - return false; - } - std::memcpy(privkey, pk, privkey_bytes); - return process_private_key(); -} - -bool PrivateKey::export_private_key(unsigned char pk[privkey_bytes]) const { // careful! - if (!inited) { - std::memset(pk, 0, privkey_bytes); - return false; - } else { - std::memcpy(pk, privkey, privkey_bytes); - return true; - } -} - -bool PrivateKey::process_private_key() { - unsigned char buff[64]; - digest::hash_str(buff, privkey, privkey_bytes); - std::memcpy(priv_salt, buff + 32, 32); - buff[0] &= -8; - buff[31] = ((buff[31] | 0x40) & ~0x80); - priv_exp.import_lsb(buff, 32); - PubKey = ellcurve::Ed25519().power_gen(priv_exp); - inited = PubKey.ok(); - if (!inited) { - clear(); - } - return inited; -} - -bool PrivateKey::compute_shared_secret(unsigned char secret[shared_secret_bytes], const PublicKey& Pub) { - if (!inited || !Pub.ok()) { - std::memset(secret, 0, shared_secret_bytes); - *(long*)secret = lrand48(); - return false; - } - auto P = ellcurve::Curve25519().power_xz(Pub.get_point_xz(), priv_exp); - if (P.is_infty()) { - std::memset(secret, 0, shared_secret_bytes); - *(long*)secret = lrand48(); - return false; - } - P.export_point_y(secret); - return true; -} - -bool PrivateKey::compute_temp_shared_secret(unsigned char secret[shared_secret_bytes], - const unsigned char temp_pub_key[pubkey_bytes]) { - PublicKey tempPubkey(temp_pub_key); - if (!tempPubkey.ok()) { - return false; - } - return compute_shared_secret(secret, tempPubkey); -} - -bool PrivateKey::sign_message(unsigned char signature[sign_bytes], const unsigned char* message, std::size_t msg_size) { - if (!inited) { - std::memset(signature, 0, sign_bytes); - return false; - } - unsigned char r_bytes[64]; - digest::hash_two_str(r_bytes, priv_salt, 32, message, msg_size); - const arith::Bignum& L = ellcurve::Ed25519().get_ell(); - arith::Bignum eR; - eR.import_lsb(r_bytes, 64); - eR %= L; - - auto pR = ellcurve::Ed25519().power_gen(eR); - - assert(pR.export_point(signature, 32)); - { - digest::SHA512 hasher(signature, 32); - hasher.feed(PubKey.get_pubkey_ptr(), 32); - hasher.feed(message, msg_size); - hasher.extract(r_bytes); - } - arith::Bignum S; - S.import_lsb(r_bytes, 64); - S %= L; - S *= priv_exp; - S += eR; - S %= L; - S.export_lsb(signature + 32, 32); - return true; -} - -// use one TempKeyGenerator object a lot of times -class TempKeyGenerator { - enum { salt_size = 64 }; - unsigned char random_salt[salt_size]; - unsigned char buffer[privkey_bytes]; - - public: - TempKeyGenerator() { - prng::rand_gen().strong_rand_bytes(random_salt, salt_size); - } - ~TempKeyGenerator() { - std::memset(random_salt, 0, salt_size); - std::memset(buffer, 0, privkey_bytes); - } - - unsigned char* get_temp_private_key(unsigned char* to, const unsigned char* message, std::size_t size, - const unsigned char* rand = 0, std::size_t rand_size = 0); // rand may be 0 - void create_temp_private_key(PrivateKey& pk, const unsigned char* message, std::size_t size, - const unsigned char* rand = 0, std::size_t rand_size = 0); - - // sets temp_pub_key and shared_secret for one-time asymmetric encryption of message - bool create_temp_shared_secret(unsigned char temp_pub_key[pubkey_bytes], unsigned char secret[shared_secret_bytes], - const PublicKey& recipientPubKey, const unsigned char* message, std::size_t size, - const unsigned char* rand = 0, std::size_t rand_size = 0); -}; - -unsigned char* TempKeyGenerator::get_temp_private_key(unsigned char* to, const unsigned char* message, std::size_t size, - const unsigned char* rand, - std::size_t rand_size) { // rand may be 0 - digest::SHA256 hasher(message, size); - hasher.feed(random_salt, salt_size); - if (rand && rand_size) { - hasher.feed(rand, rand_size); - } - if (!to) { - to = buffer; - } - hasher.extract(to); - //++ *((long *)random_salt); - return to; -} - -void TempKeyGenerator::create_temp_private_key(PrivateKey& pk, const unsigned char* message, std::size_t size, - const unsigned char* rand, std::size_t rand_size) { - pk.import_private_key(get_temp_private_key(buffer, message, size, rand, rand_size)); - std::memset(buffer, 0, privkey_bytes); -} - -bool TempKeyGenerator::create_temp_shared_secret(unsigned char temp_pub_key[pubkey_bytes], - unsigned char shared_secret[shared_secret_bytes], - const PublicKey& recipientPubKey, const unsigned char* message, - std::size_t size, const unsigned char* rand, std::size_t rand_size) { - PrivateKey tmpPk; - create_temp_private_key(tmpPk, message, size, rand, rand_size); - return tmpPk.export_public_key(temp_pub_key) && tmpPk.compute_shared_secret(shared_secret, recipientPubKey); -} - -} // namespace Ed25519 -} // namespace crypto - -// ****************************************************** - -void print_buffer(const unsigned char buffer[32]) { - for (int i = 0; i < 32; i++) { - char buff[4]; - sprintf(buff, "%02x", buffer[i]); - std::cout << buff; - } -} - -std::string buffer_to_hex(const unsigned char* buffer, std::size_t size = 32) { - char str[2 * size + 1]; - for (std::size_t i = 0; i < size; i++) { - sprintf(str + 2 * i, "%02x", buffer[i]); - } - return str; -} - -int main(void) { - arith::Bignum x = (3506824292LL << 31); - x = (2948877059LL << 31); - arith::Bignum L = (((36 * x + 36) * x + 18) * x + 6) * x + 1; - arith::Bignum P = L + 6 * sqr(x); - std::cout << "x= " << x << "; l= " << L << "; p= " << P << std::endl; - std::cout << "x= " << x.to_hex() << "; l= " << L.to_hex() << "; p= " << P.to_hex() << std::endl; - std::cout << "x mod 3=" << x % 3 << "; p mod 9=" << P % 9 << "; x/2^31=" << (x >> 31).to_hex() << "=" << (x >> 31) - << std::endl; - - crypto::Ed25519::PrivateKey PK1, PK2, PK3; - PK1.random_private_key(); - PK2.random_private_key(); - unsigned char priv2_export[32]; - bool ok = PK2.export_private_key(priv2_export); - std::cout << "PK2 = " << ok << " " << buffer_to_hex(priv2_export) << std::endl; - PK3.import_private_key(priv2_export); - std::cout << "PK3 = " << PK3.ok() << std::endl; - - unsigned char pub_export[32]; - ok = PK1.export_public_key(pub_export); - std::cout << "PubK1 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - crypto::Ed25519::PublicKey PubK1(pub_export); - ok = PK2.export_public_key(pub_export); - std::cout << "PubK2 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - crypto::Ed25519::PublicKey PubK2(pub_export); - ok = PK3.export_public_key(pub_export); - std::cout << "PubK3 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - crypto::Ed25519::PublicKey PubK3(pub_export); - ok = PubK1.export_public_key(pub_export); - std::cout << "PubK1 = " << ok << " " << buffer_to_hex(pub_export) << std::endl; - - unsigned char secret12[32], secret21[32]; - ok = PK1.compute_shared_secret(secret12, PK3.get_public_key()); - std::cout << "secret(PK1,PubK2)=" << ok << " " << buffer_to_hex(secret12) << std::endl; - ok = PK2.compute_shared_secret(secret21, PubK1); - std::cout << "secret(PK2,PubK1)=" << ok << " " << buffer_to_hex(secret21) << std::endl; - - unsigned char signature[64]; - ok = PK1.sign_message(signature, (const unsigned char*)"abc", 3); - std::cout << "PK1.signature=" << ok << " " << buffer_to_hex(signature) << std::endl; - - // signature[63] ^= 1; - ok = PubK1.check_message_signature(signature, (const unsigned char*)"abc", 3); - std::cout << "PubK1.check_signature=" << ok << std::endl; - - unsigned char temp_pubkey[32]; - crypto::Ed25519::TempKeyGenerator TKG; // use one generator a lot of times - - TKG.create_temp_shared_secret(temp_pubkey, secret12, PubK1, (const unsigned char*)"abc", 3); - std::cout << "secret12=" << buffer_to_hex(secret12) << "; temp_pubkey=" << buffer_to_hex(temp_pubkey) << std::endl; - - PK1.compute_temp_shared_secret(secret21, temp_pubkey); - std::cout << "secret21=" << buffer_to_hex(secret21) << std::endl; -} diff --git a/test/regression-tests.ans b/test/regression-tests.ans index e2057210..ec013bf7 100644 --- a/test/regression-tests.ans +++ b/test/regression-tests.ans @@ -13,11 +13,12 @@ Test_Fift_test_bls_ops_default fb0a81d4f247ab806318b051d12158f7f1aacc5513db5f8cb Test_Fift_test_deep_stack_ops_default df812efbadfffa8a3f553416f68c8c4435bac07266f84562cf98fe5f0dd62a52 Test_Fift_test_default 4e44b3382963ec89f7b5c8f2ebd85da3bc8aebad5b49f5b11b14075061477b4d Test_Fift_test_dict_default a9c8cbcfdece5573185022cea07f59f1bc404e5d879e5157a5745757f8ee0525 -Test_Fift_test_disasm_default dacaa555f5f217b2373e01e3bcd59634e480f5759dcc43edbdef35273ae38492 +Test_Fift_test_disasm_default 412cf37d37c5d9d81f44dbf4e3d3e7cda173c23b890614eb8a3bc5f2b92f13e6 Test_Fift_test_fiftext_default 2b0db5d4d4bfbc705b959cc787540d7b3a21a71469eac54756e76953f0d9afca Test_Fift_test_fixed_default 278a19d56b773102caf5c1fe2997ea6c8d0d9e720eff8503feede6398a197eec Test_Fift_test_hash_ext_default 686fc5680feca5b3bb207768215b27f6872a95128762dee0d7f2c88bc492d62d Test_Fift_test_hmap_default c269246882039824bb5822e896c3e6e82ef8e1251b6b251f5af8ea9fb8d05067 +Test_Fift_test_levels_default 9fba4a7c98aec9000f42846d6e5fd820343ba61d68f9139dd16c88ccda757cf3 Test_Fift_test_namespaces_default e6419619c51332fb5e8bf22043ef415db686c47fe24f03061e5ad831014e7c6c Test_Fift_test_rist255_default f4d7558f200a656934f986145c19b1dedbe2ad029292a5a975576d6891e25fc4 Test_Fift_test_sort2_default 9b57d47e6a10e7d1bbb565db35400debf2f963031f434742a702ec76555a5d3a @@ -37,40 +38,40 @@ Test_Fift_testvm8_default 17c9e2205ccecfd8549328b4a501d07dde0336899a7a496e747e10 Test_Fift_testvm_default ee4cbfec76c050b6de7877cfc39817d594cd1e175b6265b76fb642e30b940437 Test_Fift_testvmprog_default e5d0b2c68ee568280877c8495be558bfd0054ca5d99a99eebb525bbeca8a65af Test_RefInt_main_default 768493e0aef8e09a401a6d369edd1ef503a9215fb09dc460f52b27a8bde767cb -Test_VM_assert_code_not_null_default 05bc07e129181c972b976442f200de9487dee8bfb5ac53dd36ff61c5d4d4291d -Test_VM_assert_extract_minmax_key_default c352309c61bdf62ba7a0ba7280d303c88b0696fe7efa550c05feb2c662275297 -Test_VM_assert_lookup_prefix_default c5b45999b46d324e4008c07e5ce671bbcd833f4e15fb21a4a5136f7b980ca6fc -Test_VM_assert_pfx_dict_lookup_default fa6e3f96b31cf2ed9a9dac6b279ec05acfedf13b8ed7b815789f167d1ed7352f +Test_VM_assert_code_not_null_default 09f75cb845e0df27f3ec92405ccb4018484711a79813fd47fe8e158762c1cb93 +Test_VM_assert_extract_minmax_key_default 756337c2b2ce489243956a6608d6934ba9f76124a9435f045fc3a3b65c113d41 +Test_VM_assert_lookup_prefix_default f7683f9d2010bca53b1ef20c0e82427fb04ed62fa5fea1ee986f005ecfc9a27a +Test_VM_assert_pfx_dict_lookup_default 6d7c80d94dbc6d3ae4bafa216667b95ede4f2cbd44a59384abace84270417ef8 Test_VM_bigint_default feeb473a4ac51133989e1c145d0f49defa77117d2ae8b66bd7d12e3579e91b9f -Test_VM_bug_div_short_any_default f69aca6873f75d53dd37b6952151a2d858407a04589330762827dbc96d8b7c04 -Test_VM_bug_exec_dict_getnear_default db314c2e25b49c1f7f044d271e225f36da546c66242a8ab12f6afae37628a81e -Test_VM_bug_stack_overflow_default 7e0e3e96ca438ac96648d569c55213aa82154cf004e80265b1c481b1c4219719 -Test_VM_infinity_loop_1_default 670beda76229922806805e558d50d8f320017c642c3e7e34a7e1f2b7edb83cee -Test_VM_infinity_loop_2_default 22d9bd8cb41ff7b6cced5825e4ab73275b2fc07b1e3cd4588de815e2e6df2963 -Test_VM_memory_leak_default e10dc118f3538720a16bcbd39be9a68c3ea07f76b3d2ed5719a5e866d91f0ab3 -Test_VM_memory_leak_new_default fd2eec0a1d5ae49fb5ff8ba4b938fd9d0fe330be4a07b2b8be12bab249b00d90 -Test_VM_memory_leak_old_default f3076ae38d14000c021597b824d2f0e51de4f00601429ec3e23cca1b32dba844 -Test_VM_oom_1_default 90862ddf3270840fbc9263c003c628ddd4a8bf6548b9bd3d53eb35a5c34bc325 -Test_VM_report3_1_default 7bc6a8e66f9a0e40cd131e9829ff36fed16b464170d27c0b365a3f549df57282 -Test_VM_report3_2_default 2231bc352cf431e72a84abad2261969bd5b0ee3d9051bb7a53b69fd8ea05f951 -Test_VM_report3_3_default 9416187eb0600ed247795837ca820bccaffb39841bd9d2ff625816bfbba35d6d -Test_VM_report3_4_default 11661eb00ea37c68e3483a8e048f922f73070c6da8219247663e3d6471c5c0cc -Test_VM_report3_6_default 1d7be98a8b07f803e80168247459e620ce4b91df634ad896e878d21a3ed757c0 -Test_VM_report3_int_overflow_1_default a0c2414ca2c9672d54409ee375a6aad6e2233306eaa3dfd33a82de3c90bba3ba -Test_VM_report3_int_overflow_2_default 01cd461802e532a6830705ad50eaa1760278737ff7beeb654e3c59ceb4aa8e2e -Test_VM_report3_loop_1_default b28b35d057a1b4fa2282d6f422ecd822b18cc4344733d923ef7b002f64bc4d72 -Test_VM_report3_loop_2_default 9f8236535902b04e403d412fcf1f79e64d0f2eb25b3cc014b7d61b2d7a34b9ef -Test_VM_report3_loop_3_default 7ee05ea553c48a2476035817b9d860f614a355927c9e011b2f824dc6e5f7b0cf -Test_VM_report3_loop_4_default 4b6c2f65fda3c9e9c6660b6cbbcb1b2103c5b52870cb5daa8876bbed0ca9bbc9 -Test_VM_report3_loop_5_default 0d5d504884172ef8513757d7f6b2a3870dbd28efd5960857441c032e1c67d836 -Test_VM_report3_loop_6_default 5c35b92144debdb61b2020d690669bffbdd96f75ecde827fd0c75c05da22b5a0 -Test_VM_report3_qnot_default dc280444c7e3886cc3412f96f44c803c45287a07fcb9c638643e21bcdfe3905d -Test_VM_simple_default f6733549069427c2beb1a85ee25635540e27aa68cb8ad101d8435e19afeae862 -Test_VM_unhandled_exception_1_default 0abe2740dd3b6a6b91eb67fee573f638086fecc72653d2d81c956782186b5d78 -Test_VM_unhandled_exception_2_default 5ca67db5a0e957cc106bb47b744177ca959632a352f3629df376c34cbf03d51b -Test_VM_unhandled_exception_3_default b354e897e35a1177fd66d2c6ad7d77ae33a4e18f8678a518d79fea1388853307 -Test_VM_unhandled_exception_4_default 412cbfe13745fde55cdcc5e41d7b15ba4d09f0e723f8e4421ae0b7066ca07b8f -Test_VM_unhandled_exception_5_default d760e540cd9c200c207f71c540cdaf06d11c96e32ec19860b9c1046cb1e38855 +Test_VM_bug_div_short_any_default 49c9588b2b25b08979016f8b7ca42ae9fa4904a1dc6a2093a7dae6dce0cdf42f +Test_VM_bug_exec_dict_getnear_default 0b0cb6c1fef773f8b5a4aab8d575ba941f3b85dd449f85051296954028e59781 +Test_VM_bug_stack_overflow_default 31950eb2ed62bd1ce1c18e0031a81390ff3a3feee61ff23a09181995917137d0 +Test_VM_infinity_loop_1_default 6b8cc0ff85efa6882ffdf6e9e4333967976a29c4ce32a25b42c4c53370ad3024 +Test_VM_infinity_loop_2_default 4be08957dc86dfde3dfadd8c2f961ef2f1fa839788bbf7affea754115cee9e18 +Test_VM_memory_leak_default da588f89f3bc3ef7496bbab61e2d993f0c84bba80bb28d9c20c6eac0f7f57dd3 +Test_VM_memory_leak_new_default d25e8602c88c454ded6271d0f7afbc556820cc9942c56de9e0bd95b329f8783e +Test_VM_memory_leak_old_default 563f5a02130f231823099985c77d09913db07d2d8782edf431822f6afe4911b8 +Test_VM_oom_1_default 354934173c82e4f7bca5063846abd35cb47c4fcf1c3ba8c2fd04a4b186fcbf18 +Test_VM_report3_1_default 26bb43b5100e94791911a66226ec6545422749e0ea9e6279983b00ef4b506601 +Test_VM_report3_2_default 07a84726217f362fd71b3ceab96112ffc7aa40ed44636cf0128205d85798c66c +Test_VM_report3_3_default 0d9a92491c88ada92283691debd123724db2b7c1bd345200a53c057032e9cc07 +Test_VM_report3_4_default eb23c8e1219aed91b1b4f519efcac87018a5cf8e0ce473cfa641f8221f3c5d20 +Test_VM_report3_6_default 769ce8f9bb6fb9b8619afdb8e9d621b6199466f07c37eeea8edf3c21bf05a101 +Test_VM_report3_int_overflow_1_default 7aaf32ec7ace54b93d6b55f3ac9642572f348ebd64412afdda24849f8e4eb1dc +Test_VM_report3_int_overflow_2_default 572d197681654c94951280448ea3cf448613391633383c2424d719b03b6ec939 +Test_VM_report3_loop_1_default c9b00b32a024c65e0a8019c86e94ee365a823ff26e2420e1797902841abab57c +Test_VM_report3_loop_2_default 3654949987ddb44d8e11e84fe907d43707eaed910b9d0ad15dd68b531df1444a +Test_VM_report3_loop_3_default c1fc7e0d160b334fe8a4735a2a9d36c3b10530edaad5c1859df88382ff82a2d9 +Test_VM_report3_loop_4_default 5ad7cc51a6e553ee3d4a427229908a51692e117624838190311c7023df7a5e5b +Test_VM_report3_loop_5_default 068f81caecc344132a601259d9f73eea7089b1399793661ba1954483e0d5682c +Test_VM_report3_loop_6_default bf2e45709fceeed0192ec34af618cba3b85b90f71071e018afba686167618a90 +Test_VM_report3_qnot_default 7fcbda7e3fc4853a36e6b02e9d346f039690b1879d40850f561ea4123452d3ec +Test_VM_simple_default c96d70ea853828c89cd38fcf22543289335f3086a53301a1d0f186753ba9975b +Test_VM_unhandled_exception_1_default 80fe0e4c2ae19ae73e67e4355548d0afa59ea286be2d75a91db4529618dba008 +Test_VM_unhandled_exception_2_default 1362ba3a6ddbf5a30aba07ad58e8c24b0f85bdc53525e3eaa27af7248c62525a +Test_VM_unhandled_exception_3_default e381ce751cbd0e2994d7f60df7746b9ed7783198cfbcb31dccf02fafe68b6733 +Test_VM_unhandled_exception_4_default 51dd501ec0514f3b388145761b252f09d6ef3e831ea450605ae30511688dd708 +Test_VM_unhandled_exception_5_default 8231cfe1fb6ce6107b592f2c8f6a4eae0d123fc399163c81e8e0d5228b68bc91 Test_base64_main_default e90d541bd810871c4a81e162f1fffb555024b72807cb895414d16bc11494b789 Test_bigexp_main_default 45a1f51fb2abcc1ebf8569e1a57bebee04c334a15e03535ff5869bc9a9db8956 Test_bits256_scan_main_default 3ec7434e1cabc8e08eb2e79064e67747ffbfed177473c7873b88c144a7ed6f42 diff --git a/test/test-adnl.cpp b/test/test-adnl.cpp index d9ae4abe..85e965a4 100644 --- a/test/test-adnl.cpp +++ b/test/test-adnl.cpp @@ -52,7 +52,7 @@ int main() { td::to_integer_safe("0").ensure(); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-adnl"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-catchain.cpp b/test/test-catchain.cpp index 53b32924..3131c2b9 100644 --- a/test/test-catchain.cpp +++ b/test/test-catchain.cpp @@ -186,6 +186,7 @@ class CatChainInst : public td::actor::Actor { void create_fork() { auto height = height_ - 1; //td::Random::fast(0, height_ - 1); + LOG(WARNING) << "Creating fork, source_id=" << idx_ << ", height=" << height; auto sum = prev_values_[height] + 1; td::uint64 x[2]; @@ -219,7 +220,7 @@ int main(int argc, char *argv[]) { SET_VERBOSITY_LEVEL(verbosity_INFO); td::set_default_failure_signal_handler().ensure(); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-catchain"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); @@ -241,7 +242,8 @@ int main(int argc, char *argv[]) { td::actor::send_closure(adnl, &ton::adnl::Adnl::register_network_manager, network_manager.get()); }); - for (td::uint32 att = 0; att < 10; att++) { + for (td::uint32 att = 0; att < 20; att++) { + LOG(WARNING) << "Test #" << att; nodes.resize(total_nodes); scheduler.run_in_context([&] { @@ -274,8 +276,6 @@ int main(int argc, char *argv[]) { } }); - auto t = td::Timestamp::in(1.0); - ton::catchain::CatChainSessionId unique_id; td::Random::secure_bytes(unique_id.as_slice()); @@ -287,7 +287,7 @@ int main(int argc, char *argv[]) { } }); - t = td::Timestamp::in(10.0); + auto t = td::Timestamp::in(10.0); while (scheduler.run(1)) { if (t.is_in_past()) { break; @@ -298,9 +298,12 @@ int main(int argc, char *argv[]) { std::cout << "value=" << n.get_actor_unsafe().value() << std::endl; } - scheduler.run_in_context([&] { td::actor::send_closure(inst[0], &CatChainInst::create_fork); }); + td::uint32 fork_cnt = att < 10 ? 1 : (att - 10) / 5 + 2; + for (td::uint32 idx = 0; idx < fork_cnt; ++idx) { + scheduler.run_in_context([&] { td::actor::send_closure(inst[idx], &CatChainInst::create_fork); }); + } - t = td::Timestamp::in(10.0); + t = td::Timestamp::in(1.0); while (scheduler.run(1)) { if (t.is_in_past()) { break; diff --git a/test/test-dht.cpp b/test/test-dht.cpp index 2391fd9c..8d814f6e 100644 --- a/test/test-dht.cpp +++ b/test/test-dht.cpp @@ -41,7 +41,7 @@ int main() { SET_VERBOSITY_LEVEL(verbosity_INFO); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-dht"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-ext-client.cpp b/test/test-ext-client.cpp deleted file mode 100644 index a1187d78..00000000 --- a/test/test-ext-client.cpp +++ /dev/null @@ -1,220 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "adnl/adnl-ext-client.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -template -std::ostream &operator<<(std::ostream &stream, const td::UInt &x) { - for (size_t i = 0; i < size / 8; i++) { - stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15); - } - - return stream; -} - -class TestNode : public td::actor::Actor { - private: - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - td::actor::ActorOwn client_; - - std::unique_ptr make_callback() { - class Callback : public ton::adnl::AdnlExtClient::Callback { - public: - void on_ready() override { - td::actor::send_closure(id_, &TestNode::conn_ready); - } - void on_stop_ready() override { - td::actor::send_closure(id_, &TestNode::conn_closed); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - bool ready_ = false; - std::string db_root_; - - public: - void conn_ready() { - LOG(ERROR) << "conn ready"; - ready_ = true; - } - void conn_closed() { - ready_ = false; - } - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void set_db_root(std::string db_root) { - db_root_ = db_root; - } - void start_up() override { - } - void alarm() override { - if (ready_ && !client_.empty()) { - LOG(ERROR) << "sending query"; - auto P = td::PromiseCreator::lambda([](td::Result R) { - if (R.is_error()) { - LOG(ERROR) << "failed query: " << R.move_as_error(); - return; - } - auto F = ton::fetch_tl_object(R.move_as_ok(), true); - if (F.is_error()) { - LOG(ERROR) << "failed to pasrse answer: " << F.move_as_error(); - return; - } - auto obj = F.move_as_ok(); - LOG(ERROR) << "got answer: " << ton::ton_api::to_string(obj); - }); - td::BufferSlice b = ton::serialize_tl_object(ton::create_tl_object(), true); - td::actor::send_closure(client_, &ton::adnl::AdnlExtClient::send_query, "query", std::move(b), - td::Timestamp::in(10.0), std::move(P)); - } - alarm_timestamp() = td::Timestamp::in(2.0); - } - TestNode() { - } - void run() { - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - CHECK(gc.liteclients_.size() > 0); - auto &cli = gc.liteclients_[0]; - td::IPAddress addr; - addr.init_host_port(td::IPAddress::ipv4_to_str(cli->ip_), cli->port_).ensure(); - - client_ = ton::adnl::AdnlExtClient::create(ton::adnl::AdnlNodeIdFull::create(cli->id_).move_as_ok(), addr, - make_callback()); - alarm_timestamp() = td::Timestamp::in(2.0); - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_DEBUG); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('D', "db", "root for dbs", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_db_root, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({2}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-ext-server.cpp b/test/test-ext-server.cpp deleted file mode 100644 index b4b78728..00000000 --- a/test/test-ext-server.cpp +++ /dev/null @@ -1,221 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -template -std::ostream &operator<<(std::ostream &stream, const td::UInt &x) { - for (size_t i = 0; i < size / 8; i++) { - stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15); - } - - return stream; -} - -class TestNode : public td::actor::Actor { - private: - td::actor::ActorOwn keyring_; - td::actor::ActorOwn adnl_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - std::unique_ptr make_callback() { - class Callback : public ton::adnl::Adnl::Callback { - public: - void receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, - td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::adnl_receive_message, src, dst, std::move(data)); - } - void receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::adnl_receive_query, src, dst, std::move(data), std::move(promise)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - std::string db_root_; - - public: - void adnl_receive_message(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data) { - LOG(ERROR) << "ADNL MESSAGE FROM " << src << ": size=" << data.size() << "\n"; - } - - void adnl_receive_query(ton::adnl::AdnlNodeIdShort src, ton::adnl::AdnlNodeIdShort dst, td::BufferSlice data, - td::Promise promise) { - LOG(ERROR) << "ADNL QUERY FROM " << src << ": size=" << data.size() << "\n"; - promise.set_value(ton::serialize_tl_object(ton::create_tl_object("xxx"), true)); - } - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void set_db_root(std::string db_root) { - db_root_ = db_root; - } - void start_up() override { - } - void alarm() override { - } - TestNode() { - } - void run() { - keyring_ = ton::keyring::Keyring::create(db_root_ + "/keyring/"); - adnl_ = ton::adnl::Adnl::create(db_root_, keyring_.get()); - - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - //td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - for (auto &x : lc.liteservers_) { - auto pk = ton::PrivateKey{x->id_}; - auto pub_k = ton::adnl::AdnlNodeIdFull{pk.compute_public_key()}; - auto id = pub_k.compute_short_id(); - - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, pub_k, ton::adnl::AdnlAddressList{}); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::subscribe, id, - ton::adnl::Adnl::int_to_bytestring(ton::ton_api::getTestObject::ID), make_callback()); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ext_local_id, id); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ext_tcp_port, static_cast(x->port_)); - } - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_DEBUG); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('D', "db", "root for dbs", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_db_root, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({2}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-node.cpp b/test/test-node.cpp deleted file mode 100644 index d771d3ae..00000000 --- a/test/test-node.cpp +++ /dev/null @@ -1,376 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "catchain/catchain.h" - -#include "crypto/common/refvector.hpp" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -template -std::ostream &operator<<(std::ostream &stream, const td::UInt &x) { - for (size_t i = 0; i < size / 8; i++) { - stream << td::format::hex_digit((x.raw[i] >> 4) & 15) << td::format::hex_digit(x.raw[i] & 15); - } - - return stream; -} - -class TestNode : public td::actor::Actor { - private: - std::vector ping_ids_; - td::Timestamp next_dht_dump_; - - td::actor::ActorOwn adnl_; - std::vector> dht_nodes_; - td::actor::ActorOwn overlay_manager_; - std::vector> overlays_; - std::vector> catchains_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - td::int32 broadcast_size_ = 100; - - void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) { - LOG(ERROR) << "MESSAGE FROM " << src << " to " << dst << " of size " << std::to_string(data.size()) << "\n"; - } - - void receive_broadcast(td::UInt256 overlay_id, td::BufferSlice data) { - LOG(ERROR) << "BROADCAST IN " << overlay_id << " hash=" << td::sha256(data.as_slice()) << "\n"; - } - - void receive_query(td::UInt256 src, td::UInt256 dst, td::BufferSlice data, td::Promise promise) { - auto Q = ton::fetch_tl_object(std::move(data), true); - CHECK(Q.is_ok()); - auto R = Q.move_as_ok(); - LOG(ERROR) << "QUERY " - << " FROM " << src << " to " << dst << ": " << ton::ton_api::to_string(R) << "\n"; - promise.set_value(serialize_tl_object(ton::create_tl_object(), true)); - } - - void catchain_new_block(td::UInt256 src, td::uint64 height, td::BufferSlice data) { - LOG(ERROR) << "CATCHAIN BLOCK: " << src << "@" << height << ": " << td::sha256_uint256(data.as_slice()) << "\n"; - } - void catchain_bad_block(td::UInt256 src) { - LOG(ERROR) << "CATCHAIN BAD BLOCK\n"; - } - void catchain_broadcast(td::BufferSlice data) { - LOG(ERROR) << "CATCHAIN BROADCAST " << td::sha256_uint256(data.as_slice()) << "\n"; - } - - std::unique_ptr make_callback() { - class Callback : public ton::adnl::Adnl::Callback { - public: - void receive_message(td::UInt256 src, td::UInt256 dst, td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::receive_message, src, dst, std::move(data)); - } - void receive_query(td::UInt256 src, td::UInt256 dst, td::BufferSlice data, - td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::receive_query, src, dst, std::move(data), std::move(promise)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - std::unique_ptr make_catchain_callback() { - class Callback : public ton::CatChainActor::Callback { - public: - void new_block(td::UInt256 src, td::uint64 height, td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::catchain_new_block, src, height, std::move(data)); - } - void bad_block(td::UInt256 src) override { - td::actor::send_closure(id_, &TestNode::catchain_bad_block, src); - } - void broadcast(td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::catchain_broadcast, std::move(data)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - std::unique_ptr make_overlay_callback() { - class Callback : public ton::overlay::Overlays::Callback { - public: - void receive_message(td::UInt256 src, td::UInt256 overlay_id, td::BufferSlice data) override { - } - void receive_query(td::UInt256 src, td::uint64 query_id, td::UInt256 overlay_id, td::BufferSlice data) override { - } - - void receive_broadcast(td::UInt256 overlay_id, td::BufferSlice data) override { - td::actor::send_closure(id_, &TestNode::receive_broadcast, overlay_id, std::move(data)); - } - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - public: - void set_broadcast_size(td::int32 size) { - broadcast_size_ = size; - } - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void start_up() override { - alarm_timestamp() = td::Timestamp::in(1); - } - void alarm() override { - /*if (overlays_.size() > 0 && broadcast_size_ > 0) { - td::BufferSlice s(broadcast_size_); - td::Random::secure_bytes(s.as_slice()); - - td::actor::send_closure(overlay_manager_, &ton::overlay::OverlayManager::send_broadcast_fer, overlays_[0].first, - overlays_[0].second, ton::create_tl_object(s.as_slice().str())); - }*/ - for (auto &chain : catchains_) { - td::BufferSlice s(broadcast_size_); - td::Random::secure_bytes(s.as_slice()); - - td::actor::send_closure(chain, &ton::CatChainActor::add_event, std::move(s)); - } - alarm_timestamp() = td::Timestamp::in(1.0); - if (next_dht_dump_.is_in_past()) { - /*for (auto &node : dht_nodes_) { - char b[10240]; - td::StringBuilder sb({b, 10000}); - node->get_actor_unsafe().dump(sb); - LOG(DEBUG) << sb.as_cslice().c_str(); - }*/ - next_dht_dump_ = td::Timestamp::in(60.0); - } - } - TestNode() { - adnl_ = ton::adnl::Adnl::create("/var/ton-work/db.adnl"); - } - void run() { - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - /*if (!lc.net_) { - LOG(FATAL) << "local config does not contain NET section"; - }*/ - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - if (!gc.dht_) { - LOG(FATAL) << "global config does not contain dht section"; - } - - for (auto &it : lc.dht_) { - if (it->get_id() == ton::ton_api::dht_config_local::ID) { - auto R = ton::dht::Dht::create_from_json( - ton::clone_tl_object(gc.dht_), ton::move_tl_object_as(it), adnl_.get()); - if (R.is_error()) { - LOG(FATAL) << "fail creating dht node: " << R.move_as_error(); - } - dht_nodes_.push_back(R.move_as_ok()); - } else { - auto I = ton::move_tl_object_as(it); - for (int i = 0; i < I->cnt_; i++) { - auto R = ton::dht::Dht::create_random(ton::clone_tl_object(gc.dht_), ton::clone_tl_object(I->addr_list_), - adnl_.get()); - if (R.is_error()) { - LOG(FATAL) << "fail creating dht node: " << R.move_as_error(); - } - dht_nodes_.push_back(R.move_as_ok()); - } - } - } - - CHECK(dht_nodes_.size() > 0); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_dht_node, dht_nodes_[0].get()); - //td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::register_dht_node, dht_nodes_[0].get()); - - overlay_manager_ = ton::overlay::Overlays::create(adnl_.get(), dht_nodes_[0].get()); - - for (auto &it : lc.public_overlays_) { - if (it->get_id() == ton::ton_api::overlay_config_local::ID) { - auto X = ton::move_tl_object_as(it); - auto id = ton::create_tl_object(X->name_.clone()); - auto Id = ton::move_tl_object_as(id); - auto sid = ton::adnl_short_id(Id); - overlays_.emplace_back(X->id_->id_, sid); - td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::create_public_overlay, X->id_->id_, - std::move(Id), make_overlay_callback()); - } else { - auto X = ton::move_tl_object_as(it); - for (int i = 0; i < X->cnt_; i++) { - auto pk = ton::adnl_generate_random_pk(); - auto local_id = ton::adnl_short_id(ton::get_public_key(pk)); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, std::move(pk), ton::clone_tl_object(X->addr_list_)); - - auto id = ton::create_tl_object(X->name_.clone()); - auto Id = ton::move_tl_object_as(id); - auto sid = ton::adnl_short_id(Id); - overlays_.emplace_back(local_id, sid); - td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::create_public_overlay, local_id, - std::move(Id), make_overlay_callback()); - } - } - } - - //auto C = ton::CatChainActor::create(nullptr, adnl_.get(), overlay_manager_.get(), - // std::vector>()); - - for (auto &it : lc.catchains_) { - auto tag = it->tag_; - for (auto &V : gc.catchains_) { - if (V->tag_ == tag) { - auto v = std::move(clone_tl_object(V)->nodes_); - auto C = ton::CatChainActor::create(make_catchain_callback(), adnl_.get(), overlay_manager_.get(), - std::move(v), it->id_->id_, tag); - catchains_.push_back(std::move(C)); - } - } - } - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_DEBUG); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb({b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('s', "broadcast-size", "size of broadcast", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_broadcast_size, std::atoi(fname.str().c_str())); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({2}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-rldp.cpp b/test/test-rldp.cpp index 11344c55..b07f5f7d 100644 --- a/test/test-rldp.cpp +++ b/test/test-rldp.cpp @@ -40,7 +40,7 @@ int main() { SET_VERBOSITY_LEVEL(verbosity_INFO); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-rldp"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-rldp2.cpp b/test/test-rldp2.cpp index 5367ffd3..646b27d5 100644 --- a/test/test-rldp2.cpp +++ b/test/test-rldp2.cpp @@ -40,7 +40,7 @@ int main() { SET_VERBOSITY_LEVEL(verbosity_INFO); - std::string db_root_ = "tmp-ee"; + std::string db_root_ = "tmp-dir-test-rldp2"; td::rmrf(db_root_).ignore(); td::mkdir(db_root_).ensure(); diff --git a/test/test-ton-collator.cpp b/test/test-ton-collator.cpp index aa4c256b..3e5991ca 100644 --- a/test/test-ton-collator.cpp +++ b/test/test-ton-collator.cpp @@ -50,7 +50,7 @@ #include "validator/fabric.h" #include "validator/impl/collator.h" -#include "crypto/vm/cp0.h" +#include "crypto/vm/vm.h" #include "crypto/block/block-db.h" #include "common/errorlog.h" @@ -412,7 +412,7 @@ int main(int argc, char *argv[]) { SET_VERBOSITY_LEVEL(verbosity_INFO); td::set_default_failure_signal_handler().ensure(); - CHECK(vm::init_op_cp0()); + vm::init_vm().ensure(); td::actor::ActorOwn x; diff --git a/test/test-validator-session-state.cpp b/test/test-validator-session-state.cpp index 02ec4866..a675070c 100644 --- a/test/test-validator-session-state.cpp +++ b/test/test-validator-session-state.cpp @@ -34,6 +34,7 @@ #include "validator-session/validator-session-description.h" #include "validator-session/validator-session-state.h" +#include "validator-session/validator-session-description.hpp" #include #include @@ -48,16 +49,13 @@ class Description : public ton::validatorsession::ValidatorSessionDescription { return 0; } void *alloc(size_t size, size_t align, bool temp) override { - td::uint32 idx = temp ? 1 : 0; - auto s = pdata_cur_[idx].fetch_add(size); - CHECK(s + size <= pdata_size_[idx]); - return static_cast(pdata_[idx] + s); + return (temp ? mem_temp_ : mem_perm_).alloc(size, align); } bool is_persistent(const void *ptr) const override { - return ptr == nullptr || (ptr >= pdata_[0] && ptr < pdata_[0] + pdata_size_[0]); + return mem_perm_.contains(ptr); } void clear_temp_memory() override { - pdata_cur_[1] = 0; + mem_temp_.clear(); } ton::PublicKeyHash get_source_id(td::uint32 idx) const override { @@ -192,21 +190,8 @@ class Description : public ton::validatorsession::ValidatorSessionDescription { return opts_; } - ~Description() { - delete[] pdata_[0]; - delete[] pdata_[1]; - } - Description(ton::validatorsession::ValidatorSessionOptions opts, td::uint32 total_nodes) - : opts_(opts), total_nodes_(total_nodes) { - pdata_size_[0] = - static_cast(std::numeric_limits::max() < (1ull << 32) ? 1ull << 30 : 1ull << 33); - pdata_size_[1] = 1 << 22; - pdata_[0] = new td::uint8[pdata_size_[0]]; - pdata_[1] = new td::uint8[pdata_size_[1]]; - pdata_cur_[0] = 0; - pdata_cur_[1] = 0; - + : opts_(opts), total_nodes_(total_nodes), mem_perm_(1 << 30), mem_temp_(1 << 22) { for (auto &el : cache_) { Cached v{nullptr}; el.store(v, std::memory_order_relaxed); @@ -227,9 +212,7 @@ class Description : public ton::validatorsession::ValidatorSessionDescription { }; std::array, cache_size> cache_; - td::uint8 *pdata_[2]; - std::atomic pdata_cur_[2]; - size_t pdata_size_[2]; + ton::validatorsession::ValidatorSessionDescriptionImpl::MemPool mem_perm_, mem_temp_; }; double myrand() { diff --git a/test/test-validator-session.cpp b/test/test-validator-session.cpp deleted file mode 100644 index e986795a..00000000 --- a/test/test-validator-session.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "rldp/rldp.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "td/utils/overloaded.h" -#include "catchain/catchain.h" -#include "validator-session/validator-session.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include - -class TestNode : public td::actor::Actor { - private: - td::actor::ActorOwn keyring_; - td::actor::ActorOwn adnl_; - td::actor::ActorOwn rldp_; - std::vector> dht_nodes_; - td::actor::ActorOwn overlay_manager_; - std::vector> validator_sessions_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - std::unique_ptr make_vs_callback() { - class Callback : public ton::validatorsession::ValidatorSession::Callback { - public: - void on_candidate(td::uint32 round, ton::PublicKeyHash source, - ton::validatorsession::ValidatorSessionRootHash root_hash, td::BufferSlice data, - td::BufferSlice extra, - td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::on_candidate, round, source, root_hash, std::move(data), - std::move(extra), std::move(promise)); - } - void on_generate_slot(td::uint32 round, td::Promise promise) override { - td::actor::send_closure(id_, &TestNode::on_generate_slot, round, std::move(promise)); - } - void on_block_committed(td::uint32 round, ton::PublicKeyHash src, - ton::validatorsession::ValidatorSessionRootHash root_hash, - ton::validatorsession::ValidatorSessionFileHash file_hash, td::BufferSlice data, - std::vector> signatures, - ton::validatorsession::ValidatorSessionStats stats) override { - td::actor::send_closure(id_, &TestNode::on_block_committed, round, root_hash, std::move(data), - std::move(signatures)); - } - /*void on_missing_block_committed( - td::uint32 round, ton::validatorsession::ValidatorSessionRootHash root_hash, ton::validatorsession::ValidatorSessionFileHash file_hash, - td::BufferSlice data, std::vector> signatures) override { - td::actor::send_closure(id_, &TestNode::on_block_committed_abscent, round, root_hash, file_hash, - std::move(data), std::move(signatures)); - }*/ - void on_block_skipped(td::uint32 round) override { - td::actor::send_closure(id_, &TestNode::on_block_skipped, round); - } - void get_approved_candidate(ton::validatorsession::ValidatorSessionRootHash root_hash, - ton::validatorsession::ValidatorSessionFileHash file_hash, - ton::validatorsession::ValidatorSessionFileHash collated_data_file_hash, - td::Promise promise) override { - UNREACHABLE(); - } - - Callback(td::actor::ActorId id) : id_(std::move(id)) { - } - - private: - td::actor::ActorId id_; - }; - - return std::make_unique(actor_id(this)); - } - - td::uint64 height_ = 0; - - public: - void on_candidate(td::uint32 round, ton::PublicKeyHash source, - ton::validatorsession::ValidatorSessionRootHash root_hash, td::BufferSlice data, - td::BufferSlice collated, - td::Promise promise) { - auto sh = sha256_bits256(data.as_slice()); - auto B = ton::fetch_tl_object(std::move(data), true); - if (B.is_error()) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{B.move_as_error().to_string(), td::BufferSlice()}); - return; - } - if (collated.size() != 32) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad collated data length", td::BufferSlice()}); - return; - } - td::Bits256 x; - x.as_slice().copy_from(collated.as_slice().truncate(32)); - if (x != sh) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad block hash", td::BufferSlice()}); - return; - } - auto block = B.move_as_ok(); - if (block->root_hash_ != root_hash) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad root hash", td::BufferSlice()}); - return; - } - if (block->root_hash_ != sha256_bits256(block->data_.as_slice())) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad root hash (2)", td::BufferSlice()}); - return; - } - if (block->height_ != static_cast(height_) + 1) { - promise.set_result( - ton::validatorsession::ValidatorSession::CandidateDecision{"bad root height", td::BufferSlice()}); - return; - } - promise.set_result(ton::validatorsession::ValidatorSession::CandidateDecision{0}); - } - void on_generate_slot(td::uint32 round, td::Promise promise) { - auto data = td::BufferSlice{10000}; - td::Random::secure_bytes(data.as_slice()); - auto root_hash = sha256_bits256(data.as_slice()); - auto block = - ton::create_tl_object(root_hash, height_ + 1, std::move(data)); - - auto B = ton::serialize_tl_object(block, true); - auto hash = sha256_bits256(B.as_slice()); - auto collated = td::BufferSlice{32}; - collated.as_slice().copy_from(as_slice(hash)); - - /*BlockId id; - BlockStatus status; - RootHash root_hash; - FileHash file_hash; - FileHash collated_file_hash; - td::BufferSlice data; - td::BufferSlice collated_data;*/ - auto collated_file_hash = td::sha256_bits256(collated.as_slice()); - ton::BlockCandidate candidate{ton::BlockIdExt{ton::BlockId{0, 0, 0}, root_hash, td::sha256_bits256(B.as_slice())}, - collated_file_hash, std::move(B), std::move(collated)}; - promise.set_result(std::move(candidate)); - } - void on_block_committed(td::uint32 round, ton::validatorsession::ValidatorSessionRootHash root_hash, - td::BufferSlice data, - std::vector> signatures) { - LOG(ERROR) << "COMITTED BLOCK: ROUND=" << round << " ROOT_HASH=" << root_hash - << " DATA_HASH=" << sha256_bits256(data.as_slice()) << " SIGNED BY " << signatures.size(); - } - void on_block_skipped(td::uint32 round) { - LOG(ERROR) << "SKIPPED ROUND=" << round; - } - - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void start_up() override { - } - void alarm() override { - } - TestNode() { - } - void run() { - keyring_ = ton::keyring::Keyring::create("/var/ton-work/db.keyring"); - adnl_ = ton::adnl::Adnl::create("/var/ton-work/db.adnl", keyring_.get()); - rldp_ = ton::rldp::Rldp::create(adnl_.get()); - - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - /*if (!lc.net_) { - LOG(FATAL) << "local config does not contain NET section"; - }*/ - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - //td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - if (!gc.dht_) { - LOG(FATAL) << "global config does not contain dht section"; - } - auto dhtR = ton::dht::Dht::create_global_config(std::move(gc.dht_)); - if (dhtR.is_error()) { - LOG(FATAL) << "bad dht config: " << dhtR.move_as_error(); - } - auto dht = dhtR.move_as_ok(); - - for (auto &it : lc.dht_) { - std::vector adnl_ids; - ton::ton_api::downcast_call( - *it.get(), td::overloaded( - [&](ton::ton_api::dht_config_local &obj) { - adnl_ids.push_back(ton::adnl::AdnlNodeIdShort{obj.id_->id_}); - }, - [&](ton::ton_api::dht_config_random_local &obj) { - auto addrR = ton::adnl::AdnlAddressList::create(std::move(obj.addr_list_)); - addrR.ensure(); - auto addr = addrR.move_as_ok(); - for (td::int32 i = 0; i < obj.cnt_; i++) { - auto pk = ton::PrivateKey{ton::privkeys::Ed25519::random()}; - auto pub = pk.compute_public_key(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, ton::adnl::AdnlNodeIdFull{pub}, - addr); - auto adnl_id = ton::adnl::AdnlNodeIdShort{pub.compute_short_id()}; - adnl_ids.push_back(adnl_id); - } - })); - for (auto &id : adnl_ids) { - auto R = ton::dht::Dht::create(id, "/var/ton-work/db/", dht, keyring_.get(), adnl_.get()); - R.ensure(); - dht_nodes_.push_back(R.move_as_ok()); - } - } - - CHECK(dht_nodes_.size() > 0); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_dht_node, dht_nodes_[0].get()); - //td::actor::send_closure(overlay_manager_, &ton::overlay::Overlays::register_dht_node, dht_nodes_[0].get()); - - overlay_manager_ = - ton::overlay::Overlays::create("/var/ton-work/db.overlays", keyring_.get(), adnl_.get(), dht_nodes_[0].get()); - - //auto C = ton::CatChainActor::create(nullptr, adnl_.get(), overlay_manager_.get(), - // std::vector>()); - - for (auto &it : lc.catchains_) { - auto tag = it->tag_; - for (auto &V : gc.catchains_) { - if (V->tag_ == tag) { - auto v = std::move(clone_tl_object(V)->nodes_); - - std::vector w; - w.resize(v.size()); - for (size_t i = 0; i < w.size(); i++) { - w[i].pub_key = ton::PublicKey{v[i]}; - w[i].adnl_id = ton::adnl::AdnlNodeIdShort{w[i].pub_key.compute_short_id()}; - w[i].weight = 1; - } - - auto C = ton::validatorsession::ValidatorSession::create( - tag, ton::PublicKeyHash{it->id_->id_}, std::move(w), make_vs_callback(), keyring_.get(), adnl_.get(), - rldp_.get(), overlay_manager_.get(), "/var/ton-work/db/"); - td::actor::send_closure(C, &ton::validatorsession::ValidatorSession::start); - validator_sessions_.emplace_back(std::move(C)); - } - } - } - } -}; - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_INFO); - td::set_default_failure_signal_handler().ensure(); - - td::actor::ActorOwn x; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - auto FileLog = td::FileFd::open(td::CSlice(fname.str().c_str()), - td::FileFd::Flags::Create | td::FileFd::Flags::Append | td::FileFd::Flags::Write) - .move_as_ok(); - - dup2(FileLog.get_native_fd().fd(), 1); - dup2(FileLog.get_native_fd().fd(), 2); - return td::Status::OK(); - }); -#endif - - td::actor::Scheduler scheduler({7}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - scheduler.run(); - - return 0; -} diff --git a/test/test-validator.cpp b/test/test-validator.cpp deleted file mode 100644 index 7bc018f7..00000000 --- a/test/test-validator.cpp +++ /dev/null @@ -1,356 +0,0 @@ -/* - This file is part of TON Blockchain source code. - - TON Blockchain is free software; you can redistribute it and/or - modify it under the terms of the GNU General Public License - as published by the Free Software Foundation; either version 2 - of the License, or (at your option) any later version. - - TON Blockchain is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with TON Blockchain. If not, see . - - In addition, as a special exception, the copyright holders give permission - to link the code of portions of this program with the OpenSSL library. - You must obey the GNU General Public License in all respects for all - of the code used other than OpenSSL. If you modify file(s) with this - exception, you may extend this exception to your version of the file(s), - but you are not obligated to do so. If you do not wish to do so, delete this - exception statement from your version. If you delete this exception statement - from all source files in the program, then also delete it here. - - Copyright 2017-2020 Telegram Systems LLP -*/ -#include "adnl/adnl.h" -#include "rldp/rldp.h" -#include "adnl/utils.hpp" -#include "auto/tl/ton_api_json.h" -#include "auto/tl/ton_api.hpp" -#include "dht/dht.h" -#include "overlay/overlays.h" -#include "td/utils/OptionParser.h" -#include "td/utils/Time.h" -#include "td/utils/TsFileLog.h" -#include "td/utils/filesystem.h" -#include "td/utils/format.h" -#include "td/utils/Random.h" -#include "td/utils/port/signals.h" -#include "td/utils/port/FileFd.h" -#include "catchain/catchain.h" -#include "validator-session/validator-session.h" -#include "ton-node/ton-node.h" -#include "validator/manager.h" -#include "td/utils/filesystem.h" -#include "td/utils/ThreadSafeCounter.h" -#include "td/utils/port/path.h" -#include "crypto/vm/cp0.h" -#include "td/utils/overloaded.h" - -#include "memprof/memprof.h" - -#if TD_DARWIN || TD_LINUX -#include -#endif -#include -#include -#include - -class TestNode : public td::actor::Actor { - private: - td::actor::ActorOwn keyring_; - td::actor::ActorOwn adnl_; - td::actor::ActorOwn rldp_; - std::vector> dht_nodes_; - td::actor::ActorOwn overlay_manager_; - td::actor::ActorOwn validator_manager_; - td::actor::ActorOwn ton_node_; - - std::string local_config_ = "ton-local.config"; - std::string global_config_ = "ton-global.config"; - - std::string db_root_ = "/var/ton-work/db/"; - std::string zero_state_ = ""; - - public: - void set_local_config(std::string str) { - local_config_ = str; - } - void set_global_config(std::string str) { - global_config_ = str; - } - void set_db_root(std::string db_root) { - db_root_ = db_root; - } - void set_zero_state(std::string zero_state) { - zero_state_ = zero_state; - } - void start_up() override { - } - void alarm() override { - } - TestNode() { - } - void run() { - td::mkdir(db_root_).ensure(); - - keyring_ = ton::keyring::Keyring::create(db_root_ + "/keyring"); - adnl_ = ton::adnl::Adnl::create(db_root_, keyring_.get()); - rldp_ = ton::rldp::Rldp::create(adnl_.get()); - - auto L = td::read_file(local_config_).move_as_ok(); - auto lc_j = td::json_decode(L.as_slice()).move_as_ok(); - ton::ton_api::config_local lc; - ton::ton_api::from_json(lc, lc_j.get_object()).ensure(); - - auto G = td::read_file(global_config_).move_as_ok(); - auto gc_j = td::json_decode(G.as_slice()).move_as_ok(); - ton::ton_api::config_global gc; - ton::ton_api::from_json(gc, gc_j.get_object()).ensure(); - - for (auto &port : lc.udp_ports_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_listening_udp_port, "0.0.0.0", - static_cast(port)); - } - /*if (!lc.net_) { - LOG(FATAL) << "local config does not contain NET section"; - }*/ - - //td::actor::send_closure(network_manager_, &ton::adnl::AdnlNetworkManager::load_local_config, std::move(lc.net_)); - //td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_ids_from_config, std::move(lc.local_ids_)); - for (auto &local_id : lc.local_ids_) { - auto pk = ton::PrivateKey{local_id->id_}; - auto pub = pk.compute_public_key(); - auto addr_list = ton::adnl::AdnlAddressList::create(local_id->addr_list_); - addr_list.ensure(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, ton::adnl::AdnlNodeIdFull{pub}, addr_list.move_as_ok()); - } - if (gc.adnl_) { - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_static_nodes_from_config, - std::move(gc.adnl_->static_nodes_)); - } - if (!gc.dht_) { - LOG(FATAL) << "global config does not contain dht section"; - } - - auto dhtR = ton::dht::Dht::create_global_config(std::move(gc.dht_)); - if (dhtR.is_error()) { - LOG(FATAL) << "bad dht config: " << dhtR.move_as_error(); - } - auto dht = dhtR.move_as_ok(); - - for (auto &it : lc.dht_) { - std::vector adnl_ids; - ton::ton_api::downcast_call( - *it.get(), td::overloaded( - [&](ton::ton_api::dht_config_local &obj) { - adnl_ids.push_back(ton::adnl::AdnlNodeIdShort{obj.id_->id_}); - }, - [&](ton::ton_api::dht_config_random_local &obj) { - auto addrR = ton::adnl::AdnlAddressList::create(std::move(obj.addr_list_)); - addrR.ensure(); - auto addr = addrR.move_as_ok(); - for (td::int32 i = 0; i < obj.cnt_; i++) { - auto pk = ton::PrivateKey{ton::privkeys::Ed25519::random()}; - auto pub = pk.compute_public_key(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, ton::adnl::AdnlNodeIdFull{pub}, - addr); - auto adnl_id = ton::adnl::AdnlNodeIdShort{pub.compute_short_id()}; - adnl_ids.push_back(adnl_id); - } - })); - for (auto &id : adnl_ids) { - auto R = ton::dht::Dht::create(id, db_root_, dht, keyring_.get(), adnl_.get()); - R.ensure(); - dht_nodes_.push_back(R.move_as_ok()); - } - } - - CHECK(dht_nodes_.size() > 0); - - td::actor::send_closure(adnl_, &ton::adnl::Adnl::register_dht_node, dht_nodes_[0].get()); - overlay_manager_ = ton::overlay::Overlays::create(db_root_, keyring_.get(), adnl_.get(), dht_nodes_[0].get()); - - CHECK(lc.validators_.size() <= 1); - CHECK(gc.validators_.size() <= 1); - - bool is_validator = false; - if (lc.validators_.size() == 1) { - CHECK(gc.validators_.size() == 1); - auto zero_state_id = - ton::BlockIdExt{ton::masterchainId, ton::shardIdAll, 0, gc.validators_[0]->zero_state_root_hash_, - gc.validators_[0]->zero_state_file_hash_}; - ton::PublicKeyHash id; - ton::adnl::AdnlNodeIdShort adnl_id; - ton::ton_api::downcast_call(*lc.validators_[0].get(), - td::overloaded( - [&](ton::ton_api::validator_config_local &cfg) { - id = ton::PublicKeyHash{cfg.id_->id_}; - adnl_id = ton::adnl::AdnlNodeIdShort{id}; - is_validator = true; - }, - [&](ton::ton_api::validator_config_random_local &cfg) { - auto privkey = ton::PrivateKey{ton::privkeys::Ed25519::random()}; - auto pubkey = ton::adnl::AdnlNodeIdFull{privkey.compute_public_key()}; - auto addrR = ton::adnl::AdnlAddressList::create(std::move(cfg.addr_list_)); - addrR.ensure(); - auto addr = addrR.move_as_ok(); - id = privkey.compute_short_id(); - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, - std::move(privkey), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, pubkey, addr); - adnl_id = ton::adnl::AdnlNodeIdShort{id}; - })); - - auto opts = ton::ValidatorManagerOptions::create( - zero_state_id, std::vector{ton::ShardIdFull{ton::basechainId, ton::shardIdAll}}); - CHECK(!opts.is_null()); - opts.write().set_allow_blockchain_init(is_validator); - validator_manager_ = - ton::ValidatorManagerFactory::create(is_validator ? id : ton::PublicKeyHash::zero(), opts, db_root_, - keyring_.get(), adnl_.get(), rldp_.get(), overlay_manager_.get()); - ton_node_ = - ton::TonNodeManager::create(adnl_id, gc.validators_[0]->zero_state_file_hash_, adnl_.get(), rldp_.get(), - dht_nodes_[0].get(), overlay_manager_.get(), validator_manager_.get(), db_root_); - - for (auto &x : lc.liteservers_) { - auto pk = ton::PrivateKey{x->id_}; - auto pub_k = ton::adnl::AdnlNodeIdFull{pk.compute_public_key()}; - auto id = pub_k.compute_short_id(); - - td::actor::send_closure(keyring_, &ton::keyring::Keyring::add_key, std::move(pk), false); - td::actor::send_closure(adnl_, &ton::adnl::Adnl::add_id, pub_k, ton::adnl::AdnlAddressList{}); - td::actor::send_closure(validator_manager_, &ton::ValidatorManager::add_ext_server_id, id); - td::actor::send_closure(validator_manager_, &ton::ValidatorManager::add_ext_server_port, - static_cast(x->port_)); - } - } - } -}; - -td::Result get_uint256(std::string str) { - if (str.size() != 64) { - return td::Status::Error("uint256 must have 64 bytes"); - } - td::UInt256 res; - for (size_t i = 0; i < 32; i++) { - res.raw[i] = static_cast(td::hex_to_int(str[2 * i]) * 16 + td::hex_to_int(str[2 * i + 1])); - } - return res; -} - -std::atomic need_stats_flag{false}; -void need_stats(int sig) { - need_stats_flag.store(true); -} -void dump_memory_stats() { - if (!is_memprof_on()) { - return; - } - LOG(WARNING) << "memory_dump"; - std::vector v; - dump_alloc([&](const AllocInfo &info) { v.push_back(info); }); - std::sort(v.begin(), v.end(), [](const AllocInfo &a, const AllocInfo &b) { return a.size > b.size; }); - size_t total_size = 0; - size_t other_size = 0; - int cnt = 0; - for (auto &info : v) { - if (cnt++ < 50) { - LOG(WARNING) << td::format::as_size(info.size) << td::format::as_array(info.backtrace); - } else { - other_size += info.size; - } - total_size += info.size; - } - LOG(WARNING) << td::tag("other", td::format::as_size(other_size)); - LOG(WARNING) << td::tag("total", td::format::as_size(total_size)); - LOG(WARNING) << td::tag("total traces", get_ht_size()); - LOG(WARNING) << td::tag("fast_backtrace_success_rate", get_fast_backtrace_success_rate()); -} -void dump_stats() { - dump_memory_stats(); - LOG(WARNING) << td::NamedThreadSafeCounter::get_default(); -} - -int main(int argc, char *argv[]) { - SET_VERBOSITY_LEVEL(verbosity_INFO); - - td::set_default_failure_signal_handler().ensure(); - - CHECK(vm::init_op_cp0()); - - td::actor::ActorOwn x; - td::unique_ptr logger_; - SCOPE_EXIT { - td::log_interface = td::default_log_interface; - }; - - td::OptionParser p; - p.set_description("test basic adnl functionality"); - p.add_option('v', "verbosity", "set verbosity level", [&](td::Slice arg) { - int v = VERBOSITY_NAME(FATAL) + (td::to_integer(arg)); - SET_VERBOSITY_LEVEL(v); - return td::Status::OK(); - }); - p.add_option('h', "help", "prints_help", [&]() { - char b[10240]; - td::StringBuilder sb(td::MutableSlice{b, 10000}); - sb << p; - std::cout << sb.as_cslice().c_str(); - std::exit(2); - return td::Status::OK(); - }); - p.add_option('C', "global-config", "file to read global config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_global_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('c', "local-config", "file to read local config", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_local_config, fname.str()); - return td::Status::OK(); - }); - p.add_option('i', "id", "id of instance", [&](td::Slice fname) { return td::Status::OK(); }); - p.add_option('D', "db", "root for dbs", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_db_root, fname.str()); - return td::Status::OK(); - }); - p.add_option('z', "zero-state", "file with serialized zero state", [&](td::Slice fname) { - td::actor::send_closure(x, &TestNode::set_zero_state, fname.str()); - return td::Status::OK(); - }); - p.add_option('d', "daemonize", "set SIGHUP", [&]() { - td::set_signal_handler(td::SignalType::HangUp, [](int sig) { -#if TD_DARWIN || TD_LINUX - close(0); - setsid(); -#endif - }).ensure(); - return td::Status::OK(); - }); -#if TD_DARWIN || TD_LINUX - p.add_option('l', "logname", "log to file", [&](td::Slice fname) { - logger_ = td::TsFileLog::create(fname.str()).move_as_ok(); - td::log_interface = logger_.get(); - return td::Status::OK(); - }); -#endif - td::set_runtime_signal_handler(1, need_stats).ensure(); - - td::actor::Scheduler scheduler({7}); - - scheduler.run_in_context([&] { x = td::actor::create_actor("testnode"); }); - - scheduler.run_in_context([&] { p.run(argc, argv).ensure(); }); - scheduler.run_in_context([&] { td::actor::send_closure(x, &TestNode::run); }); - while (scheduler.run(1)) { - if (need_stats_flag.exchange(false)) { - dump_stats(); - } - } - - return 0; -} diff --git a/third-party/blst b/third-party/blst index e9dfc5ee..3dd0f804 160000 --- a/third-party/blst +++ b/third-party/blst @@ -1 +1 @@ -Subproject commit e9dfc5ee724b5b25d50a3b6226bee8c2c9d5e65d +Subproject commit 3dd0f804b1819e5d03fb22ca2e6fac105932043a diff --git a/third-party/rocksdb b/third-party/rocksdb index fcf3d75f..cb7a5e02 160000 --- a/third-party/rocksdb +++ b/third-party/rocksdb @@ -1 +1 @@ -Subproject commit fcf3d75f3f022a6a55ff1222d6b06f8518d38c7c +Subproject commit cb7a5e02edeb883193eb5b4901d5943f58e9add9 diff --git a/tl-utils/CMakeLists.txt b/tl-utils/CMakeLists.txt index b17b7dc9..d5c52d48 100644 --- a/tl-utils/CMakeLists.txt +++ b/tl-utils/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) set(TL_UTILS_SOURCE common-utils.hpp diff --git a/tl/CMakeLists.txt b/tl/CMakeLists.txt index 8adabeda..d0760a34 100644 --- a/tl/CMakeLists.txt +++ b/tl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_subdirectory(generate) set_source_files_properties(${TL_TON_API} PROPERTIES GENERATED TRUE) diff --git a/tl/generate/CMakeLists.txt b/tl/generate/CMakeLists.txt index 61d66c93..083d3973 100644 --- a/tl/generate/CMakeLists.txt +++ b/tl/generate/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) file(MAKE_DIRECTORY auto/tl) diff --git a/tl/generate/scheme/ton_api.tl b/tl/generate/scheme/ton_api.tl index 50e0aabb..43e22e41 100644 --- a/tl/generate/scheme/ton_api.tl +++ b/tl/generate/scheme/ton_api.tl @@ -781,11 +781,12 @@ http.server.config dhs:(vector http.server.dnsEntry) local_hosts:(vector http.se ---types--- -validatorSession.statsProducer id:int256 block_status:int block_timestamp:long = validatorSession.StatsProducer; +validatorSession.statsProducer id:int256 candidate_id:int256 block_status:int block_timestamp:long comment:string = validatorSession.StatsProducer; validatorSession.statsRound timestamp:long producers:(vector validatorSession.statsProducer) = validatorSession.StatsRound; -validatorSession.stats id:tonNode.blockId timestamp:long self:int256 creator:int256 total_validators:int total_weight:long +validatorSession.stats success:Bool id:tonNode.blockIdExt timestamp:long self:int256 session_id:int256 cc_seqno:int + creator:int256 total_validators:int total_weight:long signatures:int signatures_weight:long approve_signatures:int approve_signatures_weight:long first_round:int rounds:(vector validatorSession.statsRound) = validatorSession.Stats; diff --git a/tl/generate/scheme/ton_api.tlo b/tl/generate/scheme/ton_api.tlo index 4ff97e05..724f104e 100644 Binary files a/tl/generate/scheme/ton_api.tlo and b/tl/generate/scheme/ton_api.tlo differ diff --git a/tl/generate/scheme/tonlib_api.tl b/tl/generate/scheme/tonlib_api.tl index d5f1daac..95c9e74d 100644 --- a/tl/generate/scheme/tonlib_api.tl +++ b/tl/generate/scheme/tonlib_api.tl @@ -61,6 +61,7 @@ pchan.config alice_public_key:string alice_address:accountAddress bob_public_key raw.initialAccountState code:bytes data:bytes = InitialAccountState; wallet.v3.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; +wallet.v4.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; wallet.highload.v1.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; wallet.highload.v2.initialAccountState public_key:string wallet_id:int64 = InitialAccountState; @@ -73,6 +74,7 @@ pchan.initialAccountState config:pchan.config = InitialAccountState; raw.accountState code:bytes data:bytes frozen_hash:bytes = AccountState; wallet.v3.accountState wallet_id:int64 seqno:int32 = AccountState; +wallet.v4.accountState wallet_id:int64 seqno:int32 = AccountState; wallet.highload.v1.accountState wallet_id:int64 seqno:int32 = AccountState; wallet.highload.v2.accountState wallet_id:int64 = AccountState; dns.accountState wallet_id:int64 = AccountState; diff --git a/tl/generate/scheme/tonlib_api.tlo b/tl/generate/scheme/tonlib_api.tlo index 79d56a33..d2da9621 100644 Binary files a/tl/generate/scheme/tonlib_api.tlo and b/tl/generate/scheme/tonlib_api.tlo differ diff --git a/tl/tl/tl_json.h b/tl/tl/tl_json.h index 489bd6e8..8eee3aad 100644 --- a/tl/tl/tl_json.h +++ b/tl/tl/tl_json.h @@ -108,12 +108,13 @@ inline Status from_json(std::int32_t &to, JsonValue from) { inline Status from_json(bool &to, JsonValue from) { if (from.type() != JsonValue::Type::Boolean) { int32 x; + auto type = from.type(); auto status = from_json(x, std::move(from)); if (status.is_ok()) { to = x != 0; return Status::OK(); } - return Status::Error(PSLICE() << "Expected bool, got " << from.type()); + return Status::Error(PSLICE() << "Expected bool, got " << type); } to = from.get_boolean(); return Status::OK(); diff --git a/tonlib/CMakeLists.txt b/tonlib/CMakeLists.txt index 8ee8fcc6..9ce5bded 100644 --- a/tonlib/CMakeLists.txt +++ b/tonlib/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) option(BUILD_SHARED_LIBS "Use \"OFF\" for a static build." ON) diff --git a/tonlib/test/offline.cpp b/tonlib/test/offline.cpp index a1e5a0f6..7b4342dd 100644 --- a/tonlib/test/offline.cpp +++ b/tonlib/test/offline.cpp @@ -333,8 +333,8 @@ TEST(Tonlib, ConfigParseBug) { unsigned char buff[128]; int bits = (int)td::bitstring::parse_bitstring_hex_literal(buff, sizeof(buff), literal.begin(), literal.end()); CHECK(bits >= 0); - auto slice = vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize(); - block::Config::do_get_gas_limits_prices(std::move(slice), 21).ensure(); + auto cell = vm::CellBuilder().store_bits(td::ConstBitPtr{buff}, bits).finalize(); + block::Config::do_get_gas_limits_prices(vm::load_cell_slice(cell), 21).ensure(); } TEST(Tonlib, EncryptionApi) { @@ -467,15 +467,20 @@ TEST(Tonlib, KeysApi) { make_object(make_object(key->public_key_, key->secret_.copy()))) .move_as_ok(); - auto err1 = sync_send(client, make_object( - new_local_password.copy(), td::SecureString("wrong password"), - make_object(copy_word_list()))) - .move_as_error(); + auto err1 = sync_send( + client, make_object(new_local_password.copy(), td::SecureString("wrong password"), + make_object(copy_word_list()))); + if (err1.is_ok()) { + if (err1.ok()->public_key_ != key->public_key_) { + err1 = td::Status::Error("imported key successfully, but the public key is different"); + } + } + err1.ensure_error(); auto err2 = sync_send(client, make_object(new_local_password.copy(), td::SecureString(), - make_object(copy_word_list()))) - .move_as_error(); - LOG(INFO) << err1 << " | " << err2; + make_object(copy_word_list()))); + err2.ensure_error(); + LOG(INFO) << err1.move_as_error() << " | " << err2.move_as_error(); auto imported_key = sync_send(client, make_object(new_local_password.copy(), mnemonic_password.copy(), make_object(copy_word_list()))) @@ -609,7 +614,30 @@ TEST(Tonlib, ConfigCache) { "seqno": 0, "root_hash": "gj+B8wb/AmlPk1z1AhVI484rhrUpgSr2oSFIh56VoSg=", "file_hash": "Z+IKwYS54DmmJmesw/nAD5DzWadnOCMzee+kdgSYDOg=" - } + }, + "hardforks": [ + { + "file_hash": "jF3RTD+OyOoP+OI9oIjdV6M8EaOh9E+8+c3m5JkPYdg=", + "seqno": 5141579, + "root_hash": "6JSqIYIkW7y8IorxfbQBoXiuY3kXjcoYgQOxTJpjXXA=", + "workchain": -1, + "shard": -9223372036854775808 + }, + { + "file_hash": "WrNoMrn5UIVPDV/ug/VPjYatvde8TPvz5v1VYHCLPh8=", + "seqno": 5172980, + "root_hash": "054VCNNtUEwYGoRe1zjH+9b1q21/MeM+3fOo76Vcjes=", + "workchain": -1, + "shard": -9223372036854775808 + }, + { + "file_hash": "xRaxgUwgTXYFb16YnR+Q+VVsczLl6jmYwvzhQ/ncrh4=", + "seqno": 5176527, + "root_hash": "SoPLqMe9Dz26YJPOGDOHApTSe5i0kXFtRmRh/zPMGuI=", + "workchain": -1, + "shard": -9223372036854775808 + } + ] } })abc"; auto custom = R"abc({ diff --git a/tonlib/tonlib/TonlibClient.cpp b/tonlib/tonlib/TonlibClient.cpp index 45802836..7bdac059 100644 --- a/tonlib/tonlib/TonlibClient.cpp +++ b/tonlib/tonlib/TonlibClient.cpp @@ -30,6 +30,7 @@ #include "smc-envelope/GenericAccount.h" #include "smc-envelope/ManualDns.h" #include "smc-envelope/WalletV3.h" +#include "smc-envelope/WalletV4.h" #include "smc-envelope/HighloadWallet.h" #include "smc-envelope/HighloadWalletV2.h" #include "smc-envelope/PaymentChannel.h" @@ -227,6 +228,14 @@ td::Result to_init_data(const tonlib_api::wallet_v3_ini return std::move(init_data); } +td::Result to_init_data(const tonlib_api::wallet_v4_initialAccountState& wallet_state) { + TRY_RESULT(key_bytes, get_public_key(wallet_state.public_key_)); + ton::WalletV4::InitData init_data; + init_data.public_key = td::SecureString(key_bytes.key); + init_data.wallet_id = static_cast(wallet_state.wallet_id_); + return std::move(init_data); +} + td::Result to_init_data(const tonlib_api::rwallet_initialAccountState& rwallet_state) { TRY_RESULT(init_key_bytes, get_public_key(rwallet_state.init_public_key_)); TRY_RESULT(key_bytes, get_public_key(rwallet_state.public_key_)); @@ -318,6 +327,16 @@ class AccountState { return tonlib_api::make_object(static_cast(wallet_id), static_cast(seqno)); } + td::Result> to_wallet_v4_accountState() const { + if (wallet_type_ != WalletV4) { + return TonlibError::AccountTypeUnexpected("WalletV4"); + } + auto wallet = ton::WalletV4(get_smc_state()); + TRY_RESULT(seqno, wallet.get_seqno()); + TRY_RESULT(wallet_id, wallet.get_wallet_id()); + return tonlib_api::make_object(static_cast(wallet_id), + static_cast(seqno)); + } td::Result> to_wallet_highload_v1_accountState() const { if (wallet_type_ != HighloadWalletV1) { @@ -419,6 +438,8 @@ class AccountState { return f(to_dns_accountState()); case PaymentChannel: return f(to_payment_channel_accountState()); + case WalletV4: + return f(to_wallet_v4_accountState()); } UNREACHABLE(); } @@ -457,7 +478,8 @@ class AccountState { HighloadWalletV2, ManualDns, PaymentChannel, - RestrictedWallet + RestrictedWallet, + WalletV4 }; WalletType get_wallet_type() const { return wallet_type_; @@ -476,6 +498,7 @@ class AccountState { case AccountState::HighloadWalletV1: case AccountState::HighloadWalletV2: case AccountState::RestrictedWallet: + case AccountState::WalletV4: return true; } UNREACHABLE(); @@ -496,6 +519,8 @@ class AccountState { return td::make_unique(get_smc_state()); case AccountState::RestrictedWallet: return td::make_unique(get_smc_state()); + case AccountState::WalletV4: + return td::make_unique(get_smc_state()); } UNREACHABLE(); return {}; @@ -553,6 +578,23 @@ class AccountState { break; } }, + [&](tonlib_api::wallet_v4_initialAccountState& v4wallet) { + for (auto revision : ton::SmartContractCode::get_revisions(ton::SmartContractCode::WalletV4)) { + auto init_data = to_init_data(v4wallet); + if (init_data.is_error()) { + continue; + } + auto wallet = ton::WalletV4::create(init_data.move_as_ok(), revision); + if (!(wallet->get_address(ton::masterchainId) == address_ || + wallet->get_address(ton::basechainId) == address_)) { + continue; + } + wallet_type_ = WalletType::WalletV4; + wallet_revision_ = revision; + set_new_state(wallet->get_state()); + break; + } + }, [&](tonlib_api::rwallet_initialAccountState& rwallet) { for (auto revision : ton::SmartContractCode::get_revisions(ton::SmartContractCode::RestrictedWallet)) { auto r_init_data = to_init_data(rwallet); @@ -596,7 +638,7 @@ class AccountState { return wallet_type_; } auto wallet_id = static_cast(address_.workchain + wallet_id_); - ton::WalletV3::InitData init_data{key.as_octet_string(), wallet_id}; + ton::WalletInterface::DefaultInitData init_data{key.as_octet_string(), wallet_id}; auto o_revision = ton::WalletV3::guess_revision(address_, init_data); if (o_revision) { wallet_type_ = WalletType::WalletV3; @@ -604,6 +646,13 @@ class AccountState { set_new_state(ton::WalletV3::get_init_state(wallet_revision_, init_data)); return wallet_type_; } + o_revision = ton::WalletV4::guess_revision(address_, init_data); + if (o_revision) { + wallet_type_ = WalletType::WalletV4; + wallet_revision_ = o_revision.value(); + set_new_state(ton::WalletV4::get_init_state(wallet_revision_, init_data)); + return wallet_type_; + } o_revision = ton::HighloadWalletV2::guess_revision(address_, init_data); if (o_revision) { wallet_type_ = WalletType::HighloadWalletV2; @@ -681,6 +730,12 @@ class AccountState { wallet_revision_ = o_revision.value(); return wallet_type_; } + o_revision = ton::WalletV4::guess_revision(code_hash); + if (o_revision) { + wallet_type_ = WalletType::WalletV4; + wallet_revision_ = o_revision.value(); + return wallet_type_; + } o_revision = ton::HighloadWalletV2::guess_revision(code_hash); if (o_revision) { wallet_type_ = WalletType::HighloadWalletV2; @@ -1916,7 +1971,7 @@ class RunEmulator : public TonlibQueryActor { ton::UnixTime now = account_state_->get_sync_time(); bool is_special = address.workchain == ton::masterchainId && config->is_special_smartcontract(address.addr); block::Account account(address.workchain, address.addr.bits()); - if (!account.unpack(std::move(shard_account), td::Ref(), now, is_special)) { + if (!account.unpack(std::move(shard_account), now, is_special)) { check(td::Status::Error("Can't unpack shard account")); return; } @@ -2253,6 +2308,13 @@ td::Result get_account_address(const tonlib_api::wallet_v3_in ->get_address(workchain_id); } +td::Result get_account_address(const tonlib_api::wallet_v4_initialAccountState& test_wallet_state, + td::int32 revision, ton::WorkchainId workchain_id) { + TRY_RESULT(key_bytes, get_public_key(test_wallet_state.public_key_)); + return ton::WalletV4::create({key_bytes.key, static_cast(test_wallet_state.wallet_id_)}, revision) + ->get_address(workchain_id); +} + td::Result get_account_address( const tonlib_api::wallet_highload_v1_initialAccountState& test_wallet_state, td::int32 revision, ton::WorkchainId workchain_id) { @@ -2300,6 +2362,7 @@ static td::optional get_wallet_type(tonlib_api::In td::overloaded( [](const tonlib_api::raw_initialAccountState&) { return td::optional(); }, [](const tonlib_api::wallet_v3_initialAccountState&) { return ton::SmartContractCode::WalletV3; }, + [](const tonlib_api::wallet_v4_initialAccountState&) { return ton::SmartContractCode::WalletV4; }, [](const tonlib_api::wallet_highload_v1_initialAccountState&) { return ton::SmartContractCode::HighloadWalletV1; }, @@ -2389,6 +2452,12 @@ td::Status TonlibClient::do_request(tonlib_api::guessAccount& request, sources.push_back(Source{tonlib_api::make_object( request.public_key_, wallet_id_ + ton::basechainId), ton::basechainId}); + sources.push_back(Source{tonlib_api::make_object( + request.public_key_, wallet_id_ + ton::masterchainId), + ton::masterchainId}); + sources.push_back(Source{tonlib_api::make_object( + request.public_key_, wallet_id_ + ton::basechainId), + ton::basechainId}); for (Source& source : sources) { auto o_type = get_wallet_type(*source.init_state); if (!o_type) { @@ -2900,7 +2969,7 @@ struct ToRawTransactions { if (type == 0 || type == 0x2167da4b) { td::Status status; - auto r_body_message = vm::CellString::load(body.write()); + auto r_body_message = TRY_VM(vm::CellString::load(body.write())); LOG_IF(WARNING, r_body_message.is_error()) << "Failed to parse a message: " << r_body_message.error(); if (r_body_message.is_ok()) { @@ -4921,6 +4990,8 @@ td::Status TonlibClient::do_request(const tonlib_api::importKey& request, if (!request.exported_key_) { return TonlibError::EmptyField("exported_key"); } + // Note: the mnemonic is considered valid if a certain hash starts with zero byte (see Mnemonic::is_basic_seed()) + // Therefore, importKey with invalid password has 1/256 chance to return OK TRY_RESULT(key, key_storage_.import_key(std::move(request.local_password_), std::move(request.mnemonic_password_), KeyStorage::ExportedKey{std::move(request.exported_key_->word_list_)})); TRY_RESULT(key_bytes, public_key_from_bytes(key.public_key.as_slice())); diff --git a/tonlib/tonlib/tonlib-cli.cpp b/tonlib/tonlib/tonlib-cli.cpp index 5582377d..109d4a26 100644 --- a/tonlib/tonlib/tonlib-cli.cpp +++ b/tonlib/tonlib/tonlib-cli.cpp @@ -418,6 +418,8 @@ class TonlibCli : public td::actor::Actor { td::TerminalIO::out() << "exportkeypem [] - export key\n"; td::TerminalIO::out() << "gethistory - get history fo simple wallet with requested key (last 10 transactions)\n"; + td::TerminalIO::out() << "showtransactions [] - show transaction on account " + "with given and (in base64) and previous transactions (up to ).\n"; td::TerminalIO::out() << "init - init simple wallet with requested key\n"; td::TerminalIO::out() << "transfer[f][F][e][k][c] ( |) - " "make transfer from \n" @@ -515,6 +517,8 @@ class TonlibCli : public td::actor::Actor { } else if (cmd == "getmasterchainsignatures") { auto seqno = parser.read_word(); run_get_masterchain_block_signatures(seqno, std::move(cmd_promise)); + } else if (cmd == "showtransactions") { + run_show_transactions(parser, std::move(cmd_promise)); } else { cmd_promise.set_error(td::Status::Error(PSLICE() << "Unkwnown query `" << cmd << "`")); } @@ -2157,6 +2161,45 @@ class TonlibCli : public td::actor::Actor { })); } + void run_show_transactions(td::ConstParser& parser, td::Promise promise) { + TRY_RESULT_PROMISE(promise, address, to_account_address(parser.read_word(), false)); + TRY_RESULT_PROMISE(promise, lt, td::to_integer_safe(parser.read_word())); + TRY_RESULT_PROMISE(promise, hash, td::base64_decode(parser.read_word())); + int count = 1; + if (!parser.empty()) { + TRY_RESULT_PROMISE_ASSIGN(promise, count, td::to_integer_safe(parser.read_word())); + } + auto id = make_object(lt, hash); + send_query(make_object( + nullptr, ton::move_tl_object_as(std::move(address.address)), + std::move(id), count, false), + promise.wrap([](ton::tl_object_ptr&& result) -> td::Result { + td::TerminalIO::out() << "Found " << result->transactions_.size() << " transactions\n"; + for (size_t i = 0; i < result->transactions_.size(); ++i) { + td::TerminalIO::out() << "Transaction #" << i << "\n"; + auto& tr = result->transactions_[i]; + TRY_RESULT(root, vm::std_boc_deserialize(tr->data_)); + block::gen::Transaction::Record trans; + if (!tlb::unpack_cell(root, trans)) { + return td::Status::Error("cannot unpack transaction"); + } + td::TerminalIO::out() << "Transaction Account: " << tr->address_->account_address_ << "\n"; + td::TerminalIO::out() << "Transaction LT: " << tr->transaction_id_->lt_ << "\n"; + td::TerminalIO::out() << "Transaction Hash: " << td::base64_encode(tr->transaction_id_->hash_) + << "\n"; + td::TerminalIO::out() << "Transaction Timestamp: " << tr->utime_ << "\n"; + td::TerminalIO::out() << "Transaction Out messages: " << tr->out_msgs_.size() << "\n"; + td::TerminalIO::out() << "Previous transaction LT: " << trans.prev_trans_lt << "\n"; + td::TerminalIO::out() << "Previous transaction Hash: " + << td::base64_encode(trans.prev_trans_hash.as_slice()) << "\n"; + std::ostringstream ss; + block::gen::t_Transaction.print_ref(2048, ss, root); + td::TerminalIO::out() << "Transaction dump: " << ss.str() << "\n"; + } + return td::Unit(); + })); + } + void get_history2(td::Slice key, td::Result> r_state, td::Promise promise) { TRY_RESULT_PROMISE(promise, state, std::move(r_state)); diff --git a/utils/CMakeLists.txt b/utils/CMakeLists.txt index 67c48851..2734755c 100644 --- a/utils/CMakeLists.txt +++ b/utils/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/utils/opcode-timing.cpp b/utils/opcode-timing.cpp index f50d7041..876ba109 100644 --- a/utils/opcode-timing.cpp +++ b/utils/opcode-timing.cpp @@ -11,6 +11,56 @@ #include "td/utils/ScopeGuard.h" #include "td/utils/StringBuilder.h" #include "td/utils/Timer.h" +#include "block.h" +#include "td/utils/filesystem.h" +#include "mc-config.h" + +td::Ref c7; + +void prepare_c7() { + auto now = (td::uint32)td::Clocks::system(); + td::Ref config_root; + auto config_data = td::read_file("config.boc"); + if (config_data.is_ok()) { + LOG(WARNING) << "Reading config from config.boc"; + auto r_cell = vm::std_boc_deserialize(config_data.move_as_ok()); + r_cell.ensure(); + config_root = r_cell.move_as_ok(); + } + + vm::CellBuilder addr; + addr.store_long(4, 3); + addr.store_long(0, 8); + addr.store_ones(256); + std::vector tuple = { + td::make_refint(0x076ef1ea), // [ magic:0x076ef1ea + td::zero_refint(), // actions:Integer + td::zero_refint(), // msgs_sent:Integer + td::make_refint(now), // unixtime:Integer + td::make_refint(0), // block_lt:Integer + td::make_refint(0), // trans_lt:Integer + td::make_refint(123), // rand_seed:Integer + block::CurrencyCollection(td::make_refint(10000LL * 1000000000)) + .as_vm_tuple(), // balance_remaining:[Integer (Maybe Cell)] + addr.as_cellslice_ref(), // myself:MsgAddressInt + vm::StackEntry::maybe(config_root) // global_config:(Maybe Cell) ] = SmartContractInfo; + }; + tuple.push_back({}); // code:Cell + tuple.push_back(block::CurrencyCollection(td::make_refint(2000LL * 1000000000)) + .as_vm_tuple()); // in_msg_value:[Integer (Maybe Cell)] + tuple.push_back(td::make_refint(0)); // storage_fees:Integer + tuple.push_back(vm::StackEntry()); // prev_blocks_info + if (config_root.not_null()) { + block::Config config{config_root}; + config.unpack().ensure(); + tuple.push_back(config.get_unpacked_config_tuple(now)); // unpacked_config_tuple + } else { + tuple.push_back(vm::StackEntry()); + } + tuple.push_back(td::zero_refint()); + auto tuple_ref = td::make_cnt_ref>(std::move(tuple)); + c7 = vm::make_tuple_ref(std::move(tuple_ref)); +} td::Ref to_cell(td::Slice s) { if (s.size() >= 4 && s.substr(0, 4) == "boc:") { @@ -34,9 +84,11 @@ struct runInfo { long long gasUsage; int vmReturnCode; - runInfo() : runtime(0.0), gasUsage(0), vmReturnCode(0) {} - runInfo(long double runtime, long long gasUsage, int vmReturnCode) : - runtime(runtime), gasUsage(gasUsage), vmReturnCode(vmReturnCode) {} + runInfo() : runtime(0.0), gasUsage(0), vmReturnCode(0) { + } + runInfo(long double runtime, long long gasUsage, int vmReturnCode) + : runtime(runtime), gasUsage(gasUsage), vmReturnCode(vmReturnCode) { + } runInfo operator+(const runInfo& addend) const { return {runtime + addend.runtime, gasUsage + addend.gasUsage, vmReturnCode ? vmReturnCode : addend.vmReturnCode}; @@ -45,7 +97,7 @@ struct runInfo { runInfo& operator+=(const runInfo& addend) { runtime += addend.runtime; gasUsage += addend.gasUsage; - if(!vmReturnCode && addend.vmReturnCode) { + if (!vmReturnCode && addend.vmReturnCode) { vmReturnCode = addend.vmReturnCode; } return *this; @@ -64,13 +116,12 @@ typedef struct { vm::Stack prepare_stack(td::Slice command) { const auto cell = to_cell(command); - vm::init_op_cp0(); vm::DictionaryBase::get_empty_dictionary(); vm::Stack stack; try { vm::GasLimits gas_limit; - int ret = vm::run_vm_code(vm::load_cell_slice_ref(cell), stack, 0 /*flags*/, nullptr /*data*/, - vm::VmLog{}, nullptr, &gas_limit, {}, {}, nullptr, 4); + int ret = vm::run_vm_code(vm::load_cell_slice_ref(cell), stack, 0 /*flags*/, nullptr /*data*/, vm::VmLog{}, nullptr, + &gas_limit, {}, c7, nullptr, ton::SUPPORTED_VERSION); CHECK(ret == 0); } catch (...) { LOG(FATAL) << "catch unhandled exception"; @@ -80,13 +131,12 @@ vm::Stack prepare_stack(td::Slice command) { runInfo time_run_vm(td::Slice command, td::Ref stack) { const auto cell = to_cell(command); - vm::init_op_cp0(); vm::DictionaryBase::get_empty_dictionary(); CHECK(stack.is_unique()); try { vm::GasLimits gas_limit; - vm::VmState vm{vm::load_cell_slice_ref(cell), std::move(stack), gas_limit, 0, {}, vm::VmLog{}, {}, {}}; - vm.set_global_version(4); + vm::VmState vm{vm::load_cell_slice_ref(cell), std::move(stack), gas_limit, 0, {}, vm::VmLog{}, {}, c7}; + vm.set_global_version(ton::SUPPORTED_VERSION); std::clock_t cStart = std::clock(); int ret = ~vm.run(); std::clock_t cEnd = std::clock(); @@ -104,7 +154,7 @@ runtimeStats averageRuntime(td::Slice command, const vm::Stack& stack) { std::vector values; values.reserve(samples); td::Timer t0; - for(size_t i = 0; i < samples; ++i) { + for (size_t i = 0; i < samples; ++i) { const auto value_empty = time_run_vm(td::Slice(""), td::Ref(true, stack)); const auto value_code = time_run_vm(command, td::Ref(true, stack)); runInfo value{value_code.runtime - value_empty.runtime, value_code.gasUsage - value_empty.gasUsage, @@ -122,18 +172,16 @@ runtimeStats averageRuntime(td::Slice command, const vm::Stack& stack) { long double runtimeDiffSum = 0.0; long double gasDiffSum = 0.0; bool errored = false; - for(const auto value : values) { + for (const auto value : values) { const auto runtime = value.runtime - runtimeMean; const auto gasUsage = static_cast(value.gasUsage) - gasMean; runtimeDiffSum += runtime * runtime; gasDiffSum += gasUsage * gasUsage; errored = errored || value.errored(); } - return { - {runtimeMean, sqrtl(runtimeDiffSum / static_cast(samples))}, - {gasMean, sqrtl(gasDiffSum / static_cast(samples))}, - errored - }; + return {{runtimeMean, sqrtl(runtimeDiffSum / static_cast(samples))}, + {gasMean, sqrtl(gasDiffSum / static_cast(samples))}, + errored}; } runtimeStats timeInstruction(const std::string& setupCode, const std::string& toMeasure) { @@ -143,34 +191,41 @@ runtimeStats timeInstruction(const std::string& setupCode, const std::string& to int main(int argc, char** argv) { SET_VERBOSITY_LEVEL(verbosity_ERROR); - if(argc != 2 && argc != 3) { - std::cerr << - "This utility compares the timing of VM execution against the gas used.\n" - "It can be used to discover opcodes or opcode sequences that consume an " - "inordinate amount of computational resources relative to their gas cost.\n" - "\n" - "The utility expects two command line arguments: \n" - "The TVM code used to set up the stack and VM state followed by the TVM code to measure.\n" - "For example, to test the DIVMODC opcode:\n" - "\t$ " << argv[0] << " 80FF801C A90E 2>/dev/null\n" - "\tOPCODE,runtime mean,runtime stddev,gas mean,gas stddev\n" - "\tA90E,0.0066416,0.00233496,26,0\n" - "\n" - "Usage: " << argv[0] << " [TVM_SETUP_BYTECODE] TVM_BYTECODE\n" - "\tBYTECODE is either:\n" - "\t1. hex-encoded string (e.g. A90E for DIVMODC)\n" - "\t2. boc: (e.g. boc:te6ccgEBAgEABwABAogBAAJ7)" << std::endl << std::endl; + if (argc != 2 && argc != 3) { + std::cerr << "This utility compares the timing of VM execution against the gas used.\n" + "It can be used to discover opcodes or opcode sequences that consume an " + "inordinate amount of computational resources relative to their gas cost.\n" + "\n" + "The utility expects two command line arguments: \n" + "The TVM code used to set up the stack and VM state followed by the TVM code to measure.\n" + "For example, to test the DIVMODC opcode:\n" + "\t$ " + << argv[0] + << " 80FF801C A90E 2>/dev/null\n" + "\tOPCODE,runtime mean,runtime stddev,gas mean,gas stddev\n" + "\tA90E,0.0066416,0.00233496,26,0\n" + "\n" + "Usage: " + << argv[0] + << " [TVM_SETUP_BYTECODE] TVM_BYTECODE\n" + "\tBYTECODE is either:\n" + "\t1. hex-encoded string (e.g. A90E for DIVMODC)\n" + "\t2. boc: (e.g. boc:te6ccgEBAgEABwABAogBAAJ7)" + << std::endl + << std::endl; return 1; } std::cout << "OPCODE,runtime mean,runtime stddev,gas mean,gas stddev,error" << std::endl; std::string setup, code; - if(argc == 2) { + if (argc == 2) { setup = ""; code = argv[1]; } else { setup = argv[1]; code = argv[2]; } + vm::init_vm().ensure(); + prepare_c7(); const auto time = timeInstruction(setup, code); std::cout << std::fixed << std::setprecision(9) << code << "," << time.runtime.mean << "," << time.runtime.stddev << "," << time.gasUsage.mean << "," << time.gasUsage.stddev << "," << (int)time.errored << std::endl; diff --git a/validator-engine-console/CMakeLists.txt b/validator-engine-console/CMakeLists.txt index 48716960..634a5b8b 100644 --- a/validator-engine-console/CMakeLists.txt +++ b/validator-engine-console/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) add_executable (validator-engine-console validator-engine-console.cpp validator-engine-console.h validator-engine-console-query.cpp diff --git a/validator-engine/CMakeLists.txt b/validator-engine/CMakeLists.txt index d369a2c3..5df720fe 100644 --- a/validator-engine/CMakeLists.txt +++ b/validator-engine/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/validator-engine/validator-engine.cpp b/validator-engine/validator-engine.cpp index 93bd23be..a4cc65fa 100644 --- a/validator-engine/validator-engine.cpp +++ b/validator-engine/validator-engine.cpp @@ -38,7 +38,7 @@ #include "common/errorlog.h" -#include "crypto/vm/cp0.h" +#include "crypto/vm/vm.h" #include "crypto/fift/utils.h" #include "td/utils/filesystem.h" @@ -1423,6 +1423,7 @@ td::Status ValidatorEngine::load_global_config() { if (!session_logs_file_.empty()) { validator_options_.write().set_session_logs_file(session_logs_file_); } + validator_options_.write().set_celldb_compress_depth(celldb_compress_depth_); std::vector h; for (auto &x : conf.validator_->hardforks_) { @@ -1680,6 +1681,12 @@ void ValidatorEngine::load_config(td::Promise promise) { config_file_ = db_root_ + "/config.json"; } auto conf_data_R = td::read_file(config_file_); + if (conf_data_R.is_error()) { + conf_data_R = td::read_file(temp_config_file()); + if (conf_data_R.is_ok()) { + td::rename(temp_config_file(), config_file_).ensure(); + } + } if (conf_data_R.is_error()) { auto P = td::PromiseCreator::lambda( [name = local_config_, new_name = config_file_, promise = std::move(promise)](td::Result R) { @@ -1728,12 +1735,15 @@ void ValidatorEngine::load_config(td::Promise promise) { void ValidatorEngine::write_config(td::Promise promise) { auto s = td::json_encode(td::ToJson(*config_.tl().get()), true); - auto S = td::write_file(config_file_, s); - if (S.is_ok()) { - promise.set_value(td::Unit()); - } else { + auto S = td::write_file(temp_config_file(), s); + if (S.is_error()) { + td::unlink(temp_config_file()).ignore(); promise.set_error(std::move(S)); + return; } + td::unlink(config_file_).ignore(); + TRY_STATUS_PROMISE(promise, td::rename(temp_config_file(), config_file_)); + promise.set_value(td::Unit()); } td::Promise ValidatorEngine::get_key_promise(td::MultiPromise::InitGuard &ig) { @@ -3501,6 +3511,19 @@ void ValidatorEngine::run_control_query(ton::ton_api::engine_validator_getShardO promise.set_value(create_control_query_error(td::Status::Error(ton::ErrorCode::notready, "no such block"))); return; } + if (!dest) { + td::actor::send_closure( + manager, &ton::validator::ValidatorManagerInterface::get_out_msg_queue_size, handle->id(), + [promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + promise.set_value(create_control_query_error(R.move_as_error_prefix("failed to get queue size: "))); + } else { + promise.set_value(ton::create_serialize_tl_object( + R.move_as_ok())); + } + }); + return; + } td::actor::send_closure( manager, &ton::validator::ValidatorManagerInterface::get_shard_state_from_db, handle, [=, promise = std::move(promise)](td::Result> R) mutable { @@ -4060,6 +4083,15 @@ int main(int argc, char *argv[]) { acts.push_back([&x, at]() { td::actor::send_closure(x, &ValidatorEngine::schedule_shutdown, (double)at); }); return td::Status::OK(); }); + p.add_checked_option('\0', "celldb-compress-depth", + "optimize celldb by storing cells of depth X with whole subtrees (experimental, default: 0)", + [&](td::Slice arg) { + TRY_RESULT(value, td::to_integer_safe(arg)); + acts.push_back([&x, value]() { + td::actor::send_closure(x, &ValidatorEngine::set_celldb_compress_depth, value); + }); + return td::Status::OK(); + }); auto S = p.run(argc, argv); if (S.is_error()) { LOG(ERROR) << "failed to parse options: " << S.move_as_error(); @@ -4073,7 +4105,7 @@ int main(int argc, char *argv[]) { td::actor::Scheduler scheduler({threads}); scheduler.run_in_context([&] { - CHECK(vm::init_op_cp0()); + vm::init_vm().ensure(); x = td::actor::create_actor("validator-engine"); for (auto &act : acts) { act(); diff --git a/validator-engine/validator-engine.hpp b/validator-engine/validator-engine.hpp index 22e658f5..93d39e4f 100644 --- a/validator-engine/validator-engine.hpp +++ b/validator-engine/validator-engine.hpp @@ -1,4 +1,4 @@ -/* +/* This file is part of TON Blockchain source code. TON Blockchain is free software; you can redistribute it and/or @@ -166,6 +166,9 @@ class ValidatorEngine : public td::actor::Actor { std::string local_config_ = ""; std::string global_config_ = "ton-global.config"; std::string config_file_; + std::string temp_config_file() const { + return config_file_ + ".tmp"; + } std::string fift_dir_ = ""; @@ -214,6 +217,7 @@ class ValidatorEngine : public td::actor::Actor { double sync_ttl_ = 0; double archive_ttl_ = 0; double key_proof_ttl_ = 0; + td::uint32 celldb_compress_depth_ = 0; bool read_config_ = false; bool started_keyring_ = false; bool started_ = false; @@ -274,6 +278,9 @@ class ValidatorEngine : public td::actor::Actor { keys_[key.compute_short_id()] = key; } void schedule_shutdown(double at); + void set_celldb_compress_depth(td::uint32 value) { + celldb_compress_depth_ = value; + } void set_not_all_shards() { not_all_shards_ = true; } diff --git a/validator-session/CMakeLists.txt b/validator-session/CMakeLists.txt index 4931e464..c769f4d8 100644 --- a/validator-session/CMakeLists.txt +++ b/validator-session/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/validator-session/validator-session-description.hpp b/validator-session/validator-session-description.hpp index f4e1e683..0fbb4ede 100644 --- a/validator-session/validator-session-description.hpp +++ b/validator-session/validator-session-description.hpp @@ -58,6 +58,7 @@ class ValidatorSessionDescriptionImpl : public ValidatorSessionDescription { }; std::array, cache_size> cache_; + public: class MemPool { public: explicit MemPool(size_t chunk_size); @@ -71,6 +72,8 @@ class ValidatorSessionDescriptionImpl : public ValidatorSessionDescription { std::vector data_; size_t ptr_ = 0; }; + + private: MemPool mem_perm_ = MemPool(mem_chunk_size_perm); MemPool mem_temp_ = MemPool(mem_chunk_size_temp); diff --git a/validator-session/validator-session-types.h b/validator-session/validator-session-types.h index 57957478..bcbaa8f7 100644 --- a/validator-session/validator-session-types.h +++ b/validator-session/validator-session-types.h @@ -74,8 +74,10 @@ struct ValidatorSessionStats { struct Producer { PublicKeyHash id = PublicKeyHash::zero(); + ValidatorSessionCandidateId candidate_id = ValidatorSessionCandidateId::zero(); int block_status = status_none; td::uint64 block_timestamp = 0; + std::string comment; }; struct Round { td::uint64 timestamp = 0; @@ -85,6 +87,9 @@ struct ValidatorSessionStats { td::uint32 first_round; std::vector rounds; + bool success = false; + ValidatorSessionId session_id = ValidatorSessionId::zero(); + CatchainSeqno cc_seqno = 0; td::uint64 timestamp = 0; PublicKeyHash self = PublicKeyHash::zero(); PublicKeyHash creator = PublicKeyHash::zero(); diff --git a/validator-session/validator-session.cpp b/validator-session/validator-session.cpp index dc2aff49..e262c6f6 100644 --- a/validator-session/validator-session.cpp +++ b/validator-session/validator-session.cpp @@ -289,7 +289,7 @@ void ValidatorSessionImpl::process_broadcast(PublicKeyHash src, td::BufferSlice CHECK(!pending_reject_.count(block_id)); CHECK(!rejected_.count(block_id)); - stats_set_candidate_status(cur_round_, src, ValidatorSessionStats::status_received); + stats_set_candidate_status(cur_round_, src, block_id, ValidatorSessionStats::status_received); auto v = virtual_state_->choose_blocks_to_approve(description(), local_idx()); for (auto &b : v) { if (b && SentBlock::get_block_id(b) == block_id) { @@ -362,7 +362,8 @@ void ValidatorSessionImpl::process_query(PublicKeyHash src, td::BufferSlice data void ValidatorSessionImpl::candidate_decision_fail(td::uint32 round, ValidatorSessionCandidateId hash, std::string result, td::uint32 src, td::BufferSlice proof) { - stats_set_candidate_status(round, description().get_source_id(src), ValidatorSessionStats::status_rejected); + stats_set_candidate_status(round, description().get_source_id(src), hash, ValidatorSessionStats::status_rejected, + result); if (round != cur_round_) { return; } @@ -377,7 +378,8 @@ void ValidatorSessionImpl::candidate_decision_fail(td::uint32 round, ValidatorSe void ValidatorSessionImpl::candidate_decision_ok(td::uint32 round, ValidatorSessionCandidateId hash, RootHash root_hash, FileHash file_hash, td::uint32 src, td::uint32 ok_from) { - stats_set_candidate_status(round, description().get_source_id(src), ValidatorSessionStats::status_approved); + stats_set_candidate_status(round, description().get_source_id(src), hash, ValidatorSessionStats::status_approved, + PSTRING() << "ts=" << ok_from); if (round != cur_round_) { return; } @@ -813,15 +815,13 @@ void ValidatorSessionImpl::on_new_round(td::uint32 round) { if (!have_block) { callback_->on_block_skipped(cur_round_); } else { + cur_stats_.success = true; cur_stats_.timestamp = (td::uint64)td::Clocks::system(); - cur_stats_.total_validators = description().get_total_nodes(); - cur_stats_.total_weight = description().get_total_weight(); cur_stats_.signatures = (td::uint32)export_sigs.size(); cur_stats_.signatures_weight = signatures_weight; cur_stats_.approve_signatures = (td::uint32)export_approve_sigs.size(); cur_stats_.approve_signatures_weight = approve_signatures_weight; cur_stats_.creator = description().get_source_id(block->get_src_idx()); - cur_stats_.self = description().get_source_id(local_idx()); if (it == blocks_.end()) { callback_->on_block_committed(cur_round_, description().get_source_public_key(block->get_src_idx()), @@ -924,6 +924,12 @@ void ValidatorSessionImpl::destroy() { stop(); } +void ValidatorSessionImpl::get_current_stats(td::Promise promise) { + ValidatorSessionStats stats = cur_stats_; + stats.timestamp = (td::uint64)td::Clocks::system(); + promise.set_result(std::move(stats)); +} + void ValidatorSessionImpl::start_up() { CHECK(!rldp_.empty()); cur_round_ = 0; @@ -942,6 +948,10 @@ void ValidatorSessionImpl::start_up() { void ValidatorSessionImpl::stats_init() { cur_stats_ = ValidatorSessionStats(); cur_stats_.first_round = cur_round_; + cur_stats_.session_id = unique_hash_; + cur_stats_.total_validators = description().get_total_nodes(); + cur_stats_.total_weight = description().get_total_weight(); + cur_stats_.self = description().get_source_id(local_idx()); stats_add_round(); } @@ -962,20 +972,26 @@ void ValidatorSessionImpl::stats_add_round() { } } -void ValidatorSessionImpl::stats_set_candidate_status(td::uint32 round, PublicKeyHash src, int status) { +void ValidatorSessionImpl::stats_set_candidate_status(td::uint32 round, PublicKeyHash src, + ValidatorSessionCandidateId candidate_id, int status, + std::string comment) { if (round < cur_stats_.first_round || round - cur_stats_.first_round >= cur_stats_.rounds.size()) { return; } - auto& stats_round = cur_stats_.rounds[round - cur_stats_.first_round]; + auto &stats_round = cur_stats_.rounds[round - cur_stats_.first_round]; auto it = std::find_if(stats_round.producers.begin(), stats_round.producers.end(), - [&](const ValidatorSessionStats::Producer& p) { return p.id == src; }); + [&](const ValidatorSessionStats::Producer &p) { return p.id == src; }); if (it == stats_round.producers.end()) { return; } + it->candidate_id = candidate_id; if (it->block_status == ValidatorSessionStats::status_none) { it->block_timestamp = (td::uint64)td::Clocks::system(); } it->block_status = status; + if (!comment.empty()) { + it->comment = std::move(comment); + } } void ValidatorSessionImpl::get_session_info( diff --git a/validator-session/validator-session.h b/validator-session/validator-session.h index fed75020..f04a1628 100644 --- a/validator-session/validator-session.h +++ b/validator-session/validator-session.h @@ -91,6 +91,7 @@ class ValidatorSession : public td::actor::Actor { virtual void start() = 0; virtual void destroy() = 0; + virtual void get_current_stats(td::Promise promise) = 0; virtual void get_session_info(td::Promise> promise) = 0; diff --git a/validator-session/validator-session.hpp b/validator-session/validator-session.hpp index 741be11b..2c2d15c2 100644 --- a/validator-session/validator-session.hpp +++ b/validator-session/validator-session.hpp @@ -160,7 +160,8 @@ class ValidatorSessionImpl : public ValidatorSession { ValidatorSessionStats cur_stats_; void stats_init(); void stats_add_round(); - void stats_set_candidate_status(td::uint32 round, PublicKeyHash src, int status); + void stats_set_candidate_status(td::uint32 round, PublicKeyHash src, ValidatorSessionCandidateId candidate_id, + int status, std::string comment = ""); void get_session_info(td::Promise> promise) override; @@ -175,6 +176,7 @@ class ValidatorSessionImpl : public ValidatorSession { void start() override; void destroy() override; + void get_current_stats(td::Promise promise) override; void process_blocks(std::vector blocks); void finished_processing(); diff --git a/validator/CMakeLists.txt b/validator/CMakeLists.txt index b67a8c5d..aa02247b 100644 --- a/validator/CMakeLists.txt +++ b/validator/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) @@ -53,6 +53,7 @@ set(VALIDATOR_HEADERS invariants.hpp import-db-slice.hpp + queue-size-counter.hpp collator-node.hpp manager-disk.h @@ -80,6 +81,7 @@ set(VALIDATOR_SOURCE validator-full-id.cpp validator-group.cpp validator-options.cpp + queue-size-counter.cpp downloaders/wait-block-data.cpp downloaders/wait-block-state.cpp @@ -101,7 +103,8 @@ set(DISK_VALIDATOR_SOURCE validator-full-id.cpp validator-group.cpp validator-options.cpp - + queue-size-counter.cpp + downloaders/wait-block-data-disk.cpp downloaders/wait-block-state.cpp downloaders/wait-block-state-merge.cpp @@ -120,7 +123,8 @@ set(HARDFORK_VALIDATOR_SOURCE validator-full-id.cpp validator-group.cpp validator-options.cpp - + queue-size-counter.cpp + downloaders/wait-block-data-disk.cpp downloaders/wait-block-state.cpp downloaders/wait-block-state-merge.cpp diff --git a/validator/db/celldb.cpp b/validator/db/celldb.cpp index 3b2a34f3..d29126ce 100644 --- a/validator/db/celldb.cpp +++ b/validator/db/celldb.cpp @@ -23,6 +23,7 @@ #include "ton/ton-tl.hpp" #include "ton/ton-io.hpp" +#include "common/delay.h" namespace ton { @@ -62,16 +63,31 @@ void CellDbBase::execute_sync(std::function f) { f(); } -CellDbIn::CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path) - : root_db_(root_db), parent_(parent), path_(std::move(path)) { +CellDbIn::CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path, + td::Ref opts) + : root_db_(root_db), parent_(parent), path_(std::move(path)), opts_(opts) { } void CellDbIn::start_up() { + on_load_callback_ = [actor = std::make_shared>( + td::actor::create_actor("celldbmigration", actor_id(this))), + compress_depth = opts_->get_celldb_compress_depth()](const vm::CellLoader::LoadResult& res) { + if (res.cell_.is_null()) { + return; + } + bool expected_stored_boc = res.cell_->get_depth() == compress_depth && compress_depth != 0; + if (expected_stored_boc != res.stored_boc_) { + td::actor::send_closure(*actor, &CellDbIn::MigrationProxy::migrate_cell, + td::Bits256{res.cell_->get_hash().bits()}); + } + }; + CellDbBase::start_up(); cell_db_ = std::make_shared(td::RocksDb::open(path_).move_as_ok()); boc_ = vm::DynamicBagOfCellsDb::create(); - boc_->set_loader(std::make_unique(cell_db_->snapshot())).ensure(); + boc_->set_celldb_compress_depth(opts_->get_celldb_compress_depth()); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); alarm_timestamp() = td::Timestamp::in(10.0); @@ -129,7 +145,7 @@ void CellDbIn::store_cell(BlockIdExt block_id, td::Ref cell, td::Promi set_block(key_hash, std::move(D)); cell_db_->commit_write_batch().ensure(); - boc_->set_loader(std::make_unique(cell_db_->snapshot())).ensure(); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); promise.set_result(boc_->load_cell(cell->get_hash().as_slice())); @@ -140,6 +156,16 @@ void CellDbIn::get_cell_db_reader(td::Promise> } void CellDbIn::alarm() { + if (migrate_after_ && migrate_after_.is_in_past()) { + migrate_cells(); + } + if (migration_stats_ && migration_stats_->end_at_.is_in_past()) { + LOG(INFO) << "CellDb migration, " << migration_stats_->start_.elapsed() + << "s stats: batches=" << migration_stats_->batches_ << " migrated=" << migration_stats_->migrated_cells_ + << " checked=" << migration_stats_->checked_cells_ << " time=" << migration_stats_->total_time_ + << " queue_size=" << cells_to_migrate_.size(); + migration_stats_ = {}; + } auto E = get_block(get_empty_key_hash()).move_as_ok(); auto N = get_block(E.next).move_as_ok(); if (N.is_empty()) { @@ -220,7 +246,7 @@ void CellDbIn::gc_cont2(BlockHandle handle) { cell_db_->commit_write_batch().ensure(); alarm_timestamp() = td::Timestamp::now(); - boc_->set_loader(std::make_unique(cell_db_->snapshot())).ensure(); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); DCHECK(get_block(key_hash).is_error()); @@ -273,6 +299,66 @@ void CellDbIn::set_block(KeyHash key_hash, DbEntry e) { cell_db_->set(td::as_slice(key), e.release()).ensure(); } +void CellDbIn::migrate_cell(td::Bits256 hash) { + cells_to_migrate_.insert(hash); + if (!migration_active_) { + migration_active_ = true; + migrate_after_ = td::Timestamp::in(10.0); + } +} + +void CellDbIn::migrate_cells() { + migrate_after_ = td::Timestamp::never(); + if (cells_to_migrate_.empty()) { + migration_active_ = false; + return; + } + td::Timer timer; + if (!migration_stats_) { + migration_stats_ = std::make_unique(); + } + vm::CellStorer stor{*cell_db_}; + auto loader = std::make_unique(cell_db_->snapshot()); + boc_->set_loader(std::make_unique(*loader)).ensure(); + cell_db_->begin_write_batch().ensure(); + td::uint32 checked = 0, migrated = 0; + for (auto it = cells_to_migrate_.begin(); it != cells_to_migrate_.end() && checked < 128; ) { + ++checked; + td::Bits256 hash = *it; + it = cells_to_migrate_.erase(it); + auto R = loader->load(hash.as_slice(), true, boc_->as_ext_cell_creator()); + if (R.is_error()) { + continue; + } + if (R.ok().status == vm::CellLoader::LoadResult::NotFound) { + continue; + } + bool expected_stored_boc = + R.ok().cell_->get_depth() == opts_->get_celldb_compress_depth() && opts_->get_celldb_compress_depth() != 0; + if (expected_stored_boc != R.ok().stored_boc_) { + ++migrated; + stor.set(R.ok().refcnt(), R.ok().cell_, expected_stored_boc).ensure(); + } + } + cell_db_->commit_write_batch().ensure(); + boc_->set_loader(std::make_unique(cell_db_->snapshot(), on_load_callback_)).ensure(); + td::actor::send_closure(parent_, &CellDb::update_snapshot, cell_db_->snapshot()); + + double time = timer.elapsed(); + LOG(DEBUG) << "CellDb migration: migrated=" << migrated << " checked=" << checked << " time=" << time; + ++migration_stats_->batches_; + migration_stats_->migrated_cells_ += migrated; + migration_stats_->checked_cells_ += checked; + migration_stats_->total_time_ += time; + + if (cells_to_migrate_.empty()) { + migration_active_ = false; + } else { + delay_action([SelfId = actor_id(this)] { td::actor::send_closure(SelfId, &CellDbIn::migrate_cells); }, + td::Timestamp::in(time * 2)); + } +} + void CellDb::load_cell(RootHash hash, td::Promise> promise) { if (!started_) { td::actor::send_closure(cell_db_, &CellDbIn::load_cell, hash, std::move(promise)); @@ -300,7 +386,20 @@ void CellDb::get_cell_db_reader(td::Promise> p void CellDb::start_up() { CellDbBase::start_up(); boc_ = vm::DynamicBagOfCellsDb::create(); - cell_db_ = td::actor::create_actor("celldbin", root_db_, actor_id(this), path_); + boc_->set_celldb_compress_depth(opts_->get_celldb_compress_depth()); + cell_db_ = td::actor::create_actor("celldbin", root_db_, actor_id(this), path_, opts_); + on_load_callback_ = [actor = std::make_shared>( + td::actor::create_actor("celldbmigration", cell_db_.get())), + compress_depth = opts_->get_celldb_compress_depth()](const vm::CellLoader::LoadResult& res) { + if (res.cell_.is_null()) { + return; + } + bool expected_stored_boc = res.cell_->get_depth() == compress_depth && compress_depth != 0; + if (expected_stored_boc != res.stored_boc_) { + td::actor::send_closure(*actor, &CellDbIn::MigrationProxy::migrate_cell, + td::Bits256{res.cell_->get_hash().bits()}); + } + }; } CellDbIn::DbEntry::DbEntry(tl_object_ptr entry) diff --git a/validator/db/celldb.hpp b/validator/db/celldb.hpp index a05e9ddb..a2a84ab4 100644 --- a/validator/db/celldb.hpp +++ b/validator/db/celldb.hpp @@ -25,6 +25,7 @@ #include "ton/ton-types.h" #include "interfaces/block-handle.h" #include "auto/tl/ton_api.h" +#include "validator.h" namespace ton { @@ -53,7 +54,10 @@ class CellDbIn : public CellDbBase { void store_cell(BlockIdExt block_id, td::Ref cell, td::Promise> promise); void get_cell_db_reader(td::Promise> promise); - CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path); + void migrate_cell(td::Bits256 hash); + + CellDbIn(td::actor::ActorId root_db, td::actor::ActorId parent, std::string path, + td::Ref opts); void start_up() override; void alarm() override; @@ -89,13 +93,44 @@ class CellDbIn : public CellDbBase { void gc_cont2(BlockHandle handle); void skip_gc(); + void migrate_cells(); + td::actor::ActorId root_db_; td::actor::ActorId parent_; std::string path_; + td::Ref opts_; std::unique_ptr boc_; std::shared_ptr cell_db_; + + std::function on_load_callback_; + std::set cells_to_migrate_; + td::Timestamp migrate_after_ = td::Timestamp::never(); + bool migration_active_ = false; + + struct MigrationStats { + td::Timer start_; + td::Timestamp end_at_ = td::Timestamp::in(60.0); + size_t batches_ = 0; + size_t migrated_cells_ = 0; + size_t checked_cells_ = 0; + double total_time_ = 0.0; + }; + std::unique_ptr migration_stats_; + + public: + class MigrationProxy : public td::actor::Actor { + public: + explicit MigrationProxy(td::actor::ActorId cell_db) : cell_db_(cell_db) { + } + void migrate_cell(td::Bits256 hash) { + td::actor::send_closure(cell_db_, &CellDbIn::migrate_cell, hash); + } + + private: + td::actor::ActorId cell_db_; + }; }; class CellDb : public CellDbBase { @@ -104,11 +139,12 @@ class CellDb : public CellDbBase { void store_cell(BlockIdExt block_id, td::Ref cell, td::Promise> promise); void update_snapshot(std::unique_ptr snapshot) { started_ = true; - boc_->set_loader(std::make_unique(std::move(snapshot))).ensure(); + boc_->set_loader(std::make_unique(std::move(snapshot), on_load_callback_)).ensure(); } void get_cell_db_reader(td::Promise> promise); - CellDb(td::actor::ActorId root_db, std::string path) : root_db_(root_db), path_(path) { + CellDb(td::actor::ActorId root_db, std::string path, td::Ref opts) + : root_db_(root_db), path_(path), opts_(opts) { } void start_up() override; @@ -116,11 +152,14 @@ class CellDb : public CellDbBase { private: td::actor::ActorId root_db_; std::string path_; + td::Ref opts_; td::actor::ActorOwn cell_db_; std::unique_ptr boc_; bool started_ = false; + + std::function on_load_callback_; }; } // namespace validator diff --git a/validator/db/rootdb.cpp b/validator/db/rootdb.cpp index 1c8c8e59..9be708c4 100644 --- a/validator/db/rootdb.cpp +++ b/validator/db/rootdb.cpp @@ -397,7 +397,7 @@ void RootDb::get_hardforks(td::Promise> promise) { } void RootDb::start_up() { - cell_db_ = td::actor::create_actor("celldb", actor_id(this), root_path_ + "/celldb/"); + cell_db_ = td::actor::create_actor("celldb", actor_id(this), root_path_ + "/celldb/", opts_); state_db_ = td::actor::create_actor("statedb", actor_id(this), root_path_ + "/state/"); static_files_db_ = td::actor::create_actor("staticfilesdb", actor_id(this), root_path_ + "/static/"); archive_db_ = td::actor::create_actor("archive", actor_id(this), root_path_); diff --git a/validator/db/rootdb.hpp b/validator/db/rootdb.hpp index 6206bbfe..e74c636e 100644 --- a/validator/db/rootdb.hpp +++ b/validator/db/rootdb.hpp @@ -26,6 +26,7 @@ #include "statedb.hpp" #include "staticfilesdb.hpp" #include "archive-manager.hpp" +#include "validator.h" namespace ton { @@ -34,8 +35,9 @@ namespace validator { class RootDb : public Db { public: enum class Flags : td::uint32 { f_started = 1, f_ready = 2, f_switched = 4, f_archived = 8 }; - RootDb(td::actor::ActorId validator_manager, std::string root_path) - : validator_manager_(validator_manager), root_path_(std::move(root_path)) { + RootDb(td::actor::ActorId validator_manager, std::string root_path, + td::Ref opts) + : validator_manager_(validator_manager), root_path_(std::move(root_path)), opts_(opts) { } void start_up() override; @@ -141,6 +143,7 @@ class RootDb : public Db { td::actor::ActorId validator_manager_; std::string root_path_; + td::Ref opts_; td::actor::ActorOwn cell_db_; td::actor::ActorOwn state_db_; diff --git a/validator/fabric.h b/validator/fabric.h index bd26b0e3..8e544f8a 100644 --- a/validator/fabric.h +++ b/validator/fabric.h @@ -20,6 +20,7 @@ #include "interfaces/validator-manager.h" #include "interfaces/db.h" +#include "validator.h" namespace ton { @@ -28,7 +29,8 @@ namespace validator { enum ValidateMode { fake = 1, full_collated_data = 2 }; enum CollateMode { skip_store_candidate = 1 }; -td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_); +td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_, + td::Ref opts); td::actor::ActorOwn create_liteserver_cache_actor(td::actor::ActorId manager, std::string db_root); diff --git a/validator/impl/CMakeLists.txt b/validator/impl/CMakeLists.txt index 4e3fd04f..6f8ae518 100644 --- a/validator/impl/CMakeLists.txt +++ b/validator/impl/CMakeLists.txt @@ -1,4 +1,4 @@ -cmake_minimum_required(VERSION 3.0.2 FATAL_ERROR) +cmake_minimum_required(VERSION 3.5 FATAL_ERROR) if (NOT OPENSSL_FOUND) find_package(OpenSSL REQUIRED) diff --git a/validator/impl/collator-impl.h b/validator/impl/collator-impl.h index 10953a97..9fc36c27 100644 --- a/validator/impl/collator-impl.h +++ b/validator/impl/collator-impl.h @@ -60,7 +60,6 @@ class Collator final : public td::actor::Actor { bool preinit_complete{false}; bool is_key_block_{false}; bool block_full_{false}; - bool outq_cleanup_partial_{false}; bool inbound_queues_empty_{false}; bool libraries_changed_{false}; bool prev_key_block_exists_{false}; @@ -192,6 +191,7 @@ class Collator final : public td::actor::Actor { std::pair last_proc_int_msg_, first_unproc_int_msg_; std::unique_ptr in_msg_dict, out_msg_dict, old_out_msg_queue_, out_msg_queue_, sibling_out_msg_queue_; + td::uint32 out_msg_queue_size_ = 0; std::unique_ptr ihr_pending; std::shared_ptr processed_upto_, sibling_processed_upto_; std::unique_ptr block_create_stats_; @@ -210,7 +210,7 @@ class Collator final : public td::actor::Actor { // block::Account* lookup_account(td::ConstBitPtr addr) const; std::unique_ptr make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra, bool force_create = false); + bool force_create); td::Result make_account(td::ConstBitPtr addr, bool force_create = false); td::actor::ActorId get_self() { return actor_id(this); @@ -235,6 +235,7 @@ class Collator final : public td::actor::Actor { bool fix_processed_upto(block::MsgProcessedUptoCollection& upto); void got_neighbor_msg_queues(td::Result>> R); void got_neighbor_msg_queue(unsigned i, Ref res); + void got_out_queue_size(size_t i, td::Result res); bool adjust_shard_config(); bool store_shard_fees(ShardIdFull shard, const block::CurrencyCollection& fees, const block::CurrencyCollection& created); @@ -253,7 +254,7 @@ class Collator final : public td::actor::Actor { Ref& in_msg); bool create_ticktock_transactions(int mask); bool create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask); - Ref create_ordinary_transaction(Ref msg_root); + Ref create_ordinary_transaction(Ref msg_root, bool is_special_tx = false); bool check_cur_validator_set(); bool unpack_last_mc_state(); bool unpack_last_state(); @@ -269,6 +270,7 @@ class Collator final : public td::actor::Actor { bool check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev); bool check_this_shard_mc_info(); bool request_neighbor_msg_queues(); + bool request_out_msg_queue_size(); void update_max_lt(ton::LogicalTime lt); bool is_masterchain() const { return shard_.is_masterchain(); @@ -279,10 +281,6 @@ class Collator final : public td::actor::Actor { void after_get_external_messages(td::Result>> res); td::Result register_external_message_cell(Ref ext_msg, const ExtMessage::Hash& ext_hash); // td::Result register_external_message(td::Slice ext_msg_boc); - td::Result register_ihr_message_cell(Ref ihr_msg); - td::Result register_ihr_message(td::Slice ihr_msg_boc); - td::Result register_shard_signatures_cell(Ref shard_blk_signatures); - td::Result register_shard_signatures(td::Slice shard_blk_signatures_boc); void register_new_msg(block::NewOutMsg msg); void register_new_msgs(block::transaction::Transaction& trans); bool process_new_messages(bool enqueue_only = false); @@ -296,7 +294,7 @@ class Collator final : public td::actor::Actor { bool enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt); bool enqueue_transit_message(Ref msg, Ref old_msg_env, ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix, ton::AccountIdPrefixFull dest_prefix, - td::RefInt256 fwd_fee_remaining, ton::LogicalTime enqueued_lt); + td::RefInt256 fwd_fee_remaining); bool delete_out_msg_queue_msg(td::ConstBitPtr key); bool insert_in_msg(Ref in_msg); bool insert_out_msg(Ref out_msg); diff --git a/validator/impl/collator.cpp b/validator/impl/collator.cpp index 7cf14eec..50e01fd7 100644 --- a/validator/impl/collator.cpp +++ b/validator/impl/collator.cpp @@ -44,6 +44,12 @@ namespace validator { using td::Ref; using namespace std::literals::string_literals; +// Don't increase MERGE_MAX_QUEUE_LIMIT too much: merging requires cleaning the whole queue in out_msg_queue_cleanup +static const td::uint32 FORCE_SPLIT_QUEUE_SIZE = 4096; +static const td::uint32 SPLIT_MAX_QUEUE_SIZE = 100000; +static const td::uint32 MERGE_MAX_QUEUE_SIZE = 2047; +static const td::uint32 SKIP_EXTERNALS_QUEUE_SIZE = 8000; + #define DBG(__n) dbg(__n)&& #define DSTART int __dcnt = 0; #define DEB DBG(++__dcnt) @@ -54,6 +60,19 @@ static inline bool dbg(int c) { return true; } +/** + * Constructs a Collator object. + * + * @param shard The shard of the new block. + * @param is_hardfork A boolean indicating whether the new block is a hardfork. + * @param min_masterchain_block_id The the minimum reference masterchain block. + * @param prev A vector of BlockIdExt representing the previous blocks. + * @param validator_set A reference to the ValidatorSet. + * @param collator_id The public key of the block creator. + * @param manager The ActorId of the ValidatorManager. + * @param timeout The timeout for the collator. + * @param promise The promise to return the result. + */ Collator::Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_masterchain_block_id, std::vector prev, td::Ref validator_set, Ed25519_PublicKey collator_id, td::actor::ActorId manager, td::Timestamp timeout, @@ -77,8 +96,16 @@ Collator::Collator(ShardIdFull shard, bool is_hardfork, BlockIdExt min_mastercha }) { } +/** + * Starts the Collator. + * + * This function initializes the Collator by performing various checks and queries to the ValidatorManager. + * It checks the validity of the shard, the previous blocks, and the workchain. + * If all checks pass, it proceeds to query the ValidatorManager for the top masterchain state block, shard states, block data, external messages, and shard blocks. + * The results of these queries are handled by corresponding callback functions. + */ void Collator::start_up() { - LOG(INFO) << "Collator for shard " << shard_.to_str() << " started"; + LOG(WARNING) << "Collator for shard " << shard_.to_str() << " started"; LOG(DEBUG) << "Previous block #1 is " << prev_blocks.at(0).to_str(); if (prev_blocks.size() > 1) { LOG(DEBUG) << "Previous block #2 is " << prev_blocks.at(1).to_str(); @@ -250,10 +277,21 @@ void Collator::start_up() { CHECK(pending); } +/** + * Raises an error when timeout is reached. + */ void Collator::alarm() { fatal_error(ErrorCode::timeout, "timeout"); } +/** + * Generates a string representation of a shard. + * + * @param workchain The workchain ID of the shard. + * @param shard The shard ID. + * + * @returns A string representation of the shard. + */ std::string show_shard(ton::WorkchainId workchain, ton::ShardId shard) { char tmp[128]; char* ptr = tmp + snprintf(tmp, 31, "%d:", workchain); @@ -268,14 +306,35 @@ std::string show_shard(ton::WorkchainId workchain, ton::ShardId shard) { return {tmp, ptr}; } +/** + * Returns a string representation of the shard of the given block. + * + * @param blk_id The BlockId object. + * + * @returns A string representation of the shard. + */ std::string show_shard(const ton::BlockId blk_id) { return show_shard(blk_id.workchain, blk_id.shard); } +/** + * Converts a `ShardIdFull` object to a string representation. + * + * @param blk_id The `ShardIdFull` object to convert. + * + * @returns The string representation of the `ShardIdFull` object. + */ std::string show_shard(const ton::ShardIdFull blk_id) { return show_shard(blk_id.workchain, blk_id.shard); } +/** + * Handles a fatal error encountered during block candidate generation. + * + * @param error The error encountered. + * + * @returns False to indicate that a fatal error occurred. + */ bool Collator::fatal_error(td::Status error) { error.ensure_error(); LOG(ERROR) << "cannot generate block candidate for " << show_shard(shard_) << " : " << error.to_string(); @@ -288,14 +347,39 @@ bool Collator::fatal_error(td::Status error) { return false; } +/** + * Handles a fatal error encountered during block candidate generation. + * + * @param err_code The error code. + * @param err_msg The error message. + * + * @returns False to indicate that a fatal error occurred. + */ bool Collator::fatal_error(int err_code, std::string err_msg) { return fatal_error(td::Status::Error(err_code, err_msg)); } +/** + * Handles a fatal error encountered during block candidate generation. + * + * @param err_msg The error message. + * @param err_code The error code. + * + * @returns False to indicate that a fatal error occurred. + */ bool Collator::fatal_error(std::string err_msg, int err_code) { return fatal_error(td::Status::Error(err_code, err_msg)); } +/** + * Checks if there are any pending tasks. + * + * If there are no pending tasks, it continues collation. + * If collation fails, it raises a fatal error. + * If an exception is caught during collation, it raises a fatal error with the corresponding error message. + * + * @returns None + */ void Collator::check_pending() { // LOG(DEBUG) << "pending = " << pending; if (!pending) { @@ -310,6 +394,13 @@ void Collator::check_pending() { } } +/** + * Registers a masterchain state. + * + * @param other_mc_state The masterchain state to register. + * + * @returns True if the registration is successful, false otherwise. + */ bool Collator::register_mc_state(Ref other_mc_state) { if (other_mc_state.is_null() || mc_state_.is_null()) { return false; @@ -335,6 +426,14 @@ bool Collator::register_mc_state(Ref other_mc_state) { return true; } +/** + * Requests the auxiliary masterchain state. + * + * @param seqno The seqno of the block. + * @param state A reference to the auxiliary masterchain state. + * + * @returns True if the auxiliary masterchain state is successfully requested, false otherwise. + */ bool Collator::request_aux_mc_state(BlockSeqno seqno, Ref& state) { if (mc_state_.is_null()) { return fatal_error(PSTRING() << "cannot find masterchain block with seqno " << seqno @@ -367,6 +466,13 @@ bool Collator::request_aux_mc_state(BlockSeqno seqno, Ref& st return true; } +/** + * Retrieves the auxiliary masterchain state for a given block sequence number. + * + * @param seqno The sequence number of the block. + * + * @returns A reference to the auxiliary masterchain state if found, otherwise an empty reference. + */ Ref Collator::get_aux_mc_state(BlockSeqno seqno) const { auto it = aux_mc_states_.find(seqno); if (it != aux_mc_states_.end()) { @@ -376,6 +482,13 @@ Ref Collator::get_aux_mc_state(BlockSeqno seqno) const { } } +/** + * Callback function called after retrieving the auxiliary shard state. + * Handles the retrieved shard state and performs necessary checks and registrations. + * + * @param blkid The BlockIdExt of the shard state. + * @param res The result of retrieving the shard state. + */ void Collator::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result> res) { LOG(DEBUG) << "in Collator::after_get_aux_shard_state(" << blkid.to_str() << ")"; --pending; @@ -401,6 +514,14 @@ void Collator::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result, BlockIdExt>> res) { - LOG(DEBUG) << "in Collator::after_get_mc_state()"; + LOG(WARNING) << "in Collator::after_get_mc_state()"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -456,8 +582,14 @@ void Collator::after_get_mc_state(td::Result, Bl check_pending(); } +/** + * Callback function called after retrieving the shard state for a previous block. + * + * @param idx The index of the previous shard block (0 or 1). + * @param res The retrieved shard state. + */ void Collator::after_get_shard_state(int idx, td::Result> res) { - LOG(DEBUG) << "in Collator::after_get_shard_state(" << idx << ")"; + LOG(WARNING) << "in Collator::after_get_shard_state(" << idx << ")"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -483,6 +615,12 @@ void Collator::after_get_shard_state(int idx, td::Result> res) { check_pending(); } +/** + * Callback function called after retrieving block data for a previous block. + * + * @param idx The index of the previous block (0 or 1). + * @param res The retreived block data. + */ void Collator::after_get_block_data(int idx, td::Result> res) { LOG(DEBUG) << "in Collator::after_get_block_data(" << idx << ")"; --pending; @@ -522,6 +660,11 @@ void Collator::after_get_block_data(int idx, td::Result> res) { check_pending(); } +/** + * Callback function called after retrieving shard block descriptions for masterchain. + * + * @param res The retrieved shard block descriptions. + */ void Collator::after_get_shard_blocks(td::Result>> res) { --pending; if (res.is_error()) { @@ -534,6 +677,11 @@ void Collator::after_get_shard_blocks(td::Resultget_neighbor_shard_hash_ids(shard_); @@ -648,6 +806,32 @@ bool Collator::request_neighbor_msg_queues() { return true; } +/** + * Requests the size of the outbound message queue from the previous state(s). + * +* @returns True if the request was successful, false otherwise. + */ +bool Collator::request_out_msg_queue_size() { + if (after_split_) { + // If block is after split, the size is calculated during split (see Collator::split_last_state) + return true; + } + for (size_t i = 0; i < prev_blocks.size(); ++i) { + ++pending; + send_closure_later(manager, &ValidatorManager::get_out_msg_queue_size, prev_blocks[i], + [self = get_self(), i](td::Result res) { + td::actor::send_closure(std::move(self), &Collator::got_out_queue_size, i, std::move(res)); + }); + } + return true; +} + +/** + * Handles the result of obtaining the outbound queue for a neighbor. + * + * @param i The index of the neighbor. + * @param res The obtained outbound queue. + */ void Collator::got_neighbor_msg_queues(td::Result>> R) { --pending; if (R.is_error()) { @@ -701,7 +885,7 @@ void Collator::got_neighbor_msg_queue(unsigned i, Ref res) { } Ref outq_descr = outq_descr_res.move_as_ok(); block::McShardDescr& descr = neighbors_.at(i); - LOG(DEBUG) << "obtained outbound queue for neighbor #" << i << "(" << descr.shard().to_str() << ")"; + LOG(WARNING) << "obtained outbound queue for neighbor #" << i << "(" << descr.shard().to_str() << ")"; if (outq_descr->get_block_id() != descr.blk_) { LOG(DEBUG) << "outq_descr->id = " << outq_descr->get_block_id().to_str() << " ; descr.id = " << descr.blk_.to_str(); fatal_error( @@ -752,6 +936,33 @@ void Collator::got_neighbor_msg_queue(unsigned i, Ref res) { } while (false); } +/** + * Handles the result of obtaining the size of the outbound message queue. + * + * If the block is after merge then the two sizes are added. + * + * @param i The index of the previous block (0 or 1). + * @param res The result object containing the size of the queue. + */ +void Collator::got_out_queue_size(size_t i, td::Result res) { + --pending; + if (res.is_error()) { + fatal_error( + res.move_as_error_prefix(PSTRING() << "failed to get message queue size from prev block #" << i << ": ")); + return; + } + td::uint32 size = res.move_as_ok(); + LOG(WARNING) << "got outbound queue size from prev block #" << i << ": " << size; + out_msg_queue_size_ += size; + check_pending(); +} + +/** + * Unpacks and merges the states of two previous blocks. + * Used if the block is after_merge. + * + * @returns True if the unpacking and merging was successful, false otherwise. + */ bool Collator::unpack_merge_last_state() { LOG(DEBUG) << "unpack/merge last states"; // 0. mechanically merge two ShardStateUnsplit into split_state constructor @@ -797,6 +1008,12 @@ bool Collator::unpack_merge_last_state() { return import_shard_state_data(ss0); } +/** + * Unpacks the state of the previous block. + * Used if the block is not after_merge. + * + * @returns True if the unpacking is successful, false otherwise. + */ bool Collator::unpack_last_state() { if (after_merge_) { if (!unpack_merge_last_state()) { @@ -823,6 +1040,15 @@ bool Collator::unpack_last_state() { import_shard_state_data(ss); } +/** + * Unpacks the state of a previous block and performs necessary checks. + * + * @param ss The ShardState object to unpack the state into. + * @param blkid The BlockIdExt of the previous block. + * @param prev_state_root The root of the state. + * + * @returns True if the unpacking and checks are successful, false otherwise. + */ bool Collator::unpack_one_last_state(block::ShardState& ss, BlockIdExt blkid, Ref prev_state_root) { auto res = ss.unpack_state_ext(blkid, std::move(prev_state_root), global_id_, prev_mc_block_seqno, after_split_, after_split_ | after_merge_, [self = this](ton::BlockSeqno mc_seqno) { @@ -841,6 +1067,14 @@ bool Collator::unpack_one_last_state(block::ShardState& ss, BlockIdExt blkid, Re return true; } +/** + * Splits the state of previous block. + * Used if the block is after_split. + * + * @param ss The ShardState object representing the previous state. The result is stored here. + * + * @returns True if the split operation is successful, false otherwise. + */ bool Collator::split_last_state(block::ShardState& ss) { LOG(INFO) << "Splitting previous state " << ss.id_.to_str() << " to subshard " << shard_.to_str(); CHECK(after_split_); @@ -855,18 +1089,26 @@ bool Collator::split_last_state(block::ShardState& ss) { return fatal_error(res2.move_as_error()); } sibling_processed_upto_ = res2.move_as_ok(); - auto res3 = ss.split(shard_); + auto res3 = ss.split(shard_, &out_msg_queue_size_); if (res3.is_error()) { return fatal_error(std::move(res3)); } return true; } -// SETS: account_dict, shard_libraries_, mc_state_extra -// total_balance_ = old_total_balance_, total_validator_fees_ -// SETS: overload_history_, underload_history_ -// SETS: prev_state_utime_, prev_state_lt_, prev_vert_seqno_ -// SETS: out_msg_queue, processed_upto_, ihr_pending +/** + * Imports the shard state data into the Collator object. + * + * SETS: account_dict, shard_libraries_, mc_state_extra + * total_balance_ = old_total_balance_, total_validator_fees_ + * SETS: overload_history_, underload_history_ + * SETS: prev_state_utime_, prev_state_lt_, prev_vert_seqno_ + * SETS: out_msg_queue, processed_upto_, ihr_pending + * + * @param ss The ShardState object containing the shard state data. + * + * @returns True if the import was successful, False otherwise. + */ bool Collator::import_shard_state_data(block::ShardState& ss) { account_dict = std::move(ss.account_dict_); shard_libraries_ = std::move(ss.shard_libraries_); @@ -888,6 +1130,12 @@ bool Collator::import_shard_state_data(block::ShardState& ss) { return true; } +/** + * Adds trivials neighbor after merging two shards. + * Trivial neighbors are the two previous blocks. + * + * @returns True if the operation is successful, false otherwise. + */ bool Collator::add_trivial_neighbor_after_merge() { LOG(DEBUG) << "in add_trivial_neighbor_after_merge()"; CHECK(prev_blocks.size() == 2); @@ -922,6 +1170,12 @@ bool Collator::add_trivial_neighbor_after_merge() { return true; } +/** + * Adds a trivial neighbor. + * A trivial neighbor is the previous block. + * + * @returns True if the operation is successful, false otherwise. + */ bool Collator::add_trivial_neighbor() { LOG(DEBUG) << "in add_trivial_neighbor()"; if (after_merge_) { @@ -1057,6 +1311,15 @@ bool Collator::add_trivial_neighbor() { return true; } +/** + * Checks the previous block against the block registered in the masterchain. + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * @param chk_chain_len Flag indicating whether to check the chain length. + * + * @returns True if the previous block is valid, false otherwise. + */ bool Collator::check_prev_block(const BlockIdExt& listed, const BlockIdExt& prev, bool chk_chain_len) { if (listed.seqno() > prev.seqno()) { return fatal_error(PSTRING() << "cannot generate a shardchain block after previous block " << prev.to_str() @@ -1076,6 +1339,14 @@ bool Collator::check_prev_block(const BlockIdExt& listed, const BlockIdExt& prev return true; } +/** + * Checks the previous block against the block registered in the masterchain. + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * + * @returns True if the previous block is equal to the one registered in the masterchain, false otherwise. + */ bool Collator::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev) { if (listed != prev) { return fatal_error(PSTRING() << "cannot generate shardchain block for shard " << shard_.to_str() @@ -1086,6 +1357,11 @@ bool Collator::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt return true; } +/** + * Checks the validity of the shard configuration of the current shard. + * + * @returns True if the shard configuration is valid, false otherwise. + */ bool Collator::check_this_shard_mc_info() { wc_info_ = config_->get_workchain_info(workchain()); if (wc_info_.is_null()) { @@ -1220,14 +1496,27 @@ bool Collator::check_this_shard_mc_info() { return true; } +/** + * Initializes the block limits for the collator. + * + * @returns True if the block limits were successfully initialized, false otherwise. + */ bool Collator::init_block_limits() { CHECK(block_limits_); CHECK(state_usage_tree_); + if (now_ > prev_now_ + 15 && block_limits_->lt_delta.hard() > 200) { + block_limits_->lt_delta = {20, 180, 200}; + } block_limits_->usage_tree = state_usage_tree_.get(); block_limit_status_ = std::make_unique(*block_limits_); return true; } +/** + * Performs pre-initialization steps for the Collator. + * + * @returns True if pre-initialization is successful, false otherwise. + */ bool Collator::do_preinit() { CHECK(prev_blocks.size() == 1U + after_merge_); last_block_seqno = prev_blocks[0].seqno(); @@ -1281,9 +1570,18 @@ bool Collator::do_preinit() { if (!request_neighbor_msg_queues()) { return false; } + if (!request_out_msg_queue_size()) { + return false; + } return true; } +/** + * Adjusts the shard configuration by adding new workchains to the shard configuration in the masterchain state. + * Used in masterchain collator. + * + * @returns True if the shard configuration was successfully adjusted, false otherwise. + */ bool Collator::adjust_shard_config() { CHECK(is_masterchain() && config_ && shard_conf_); const block::WorkchainSet& wset = config_->get_workchain_list(); @@ -1312,12 +1610,30 @@ bool Collator::adjust_shard_config() { return true; } +/** + * Compares two ShardTopBlockDescription references based on their block IDs. + * + * @param a The first ShardTopBlockDescription reference. + * @param b The second ShardTopBlockDescription reference. + * + * @returns True if a is considered less than b, false otherwise. + */ static bool cmp_shard_block_descr_ref(const Ref& a, const Ref& b) { BlockId x = a->block_id().id, y = b->block_id().id; return x.workchain < y.workchain || (x.workchain == y.workchain && (x.shard < y.shard || (x.shard == y.shard && x.seqno > y.seqno))); } +/** + * Stores the fees imported from a shard blocks to `fees_import_dict_`. + * Used in masterchain collator. + * + * @param shard The shard identifier. + * @param fees The fees imported from the block. + * @param created The fee for creating shard blocks. + * + * @returns True if the fees were successfully stored, false otherwise. + */ bool Collator::store_shard_fees(ShardIdFull shard, const block::CurrencyCollection& fees, const block::CurrencyCollection& created) { if (shard.is_valid() && fees.is_valid()) { @@ -1333,6 +1649,14 @@ bool Collator::store_shard_fees(ShardIdFull shard, const block::CurrencyCollecti } } +/** + * Stores the fees imported from a shard blocks to `fees_import_dict_`. + * Used in masterchain collator. + * + * @param descr A reference to the McShardHash object containing the shard information. + * + * @returns True if the shard fees and funds created were successfully stored, false otherwise. + */ bool Collator::store_shard_fees(Ref descr) { CHECK(descr.not_null()); CHECK(descr->fees_collected_.is_valid()); @@ -1341,6 +1665,11 @@ bool Collator::store_shard_fees(Ref descr) { return true; } +/** + * Imports new top shard blocks and updates the shard configuration. + * + * @returns True if the import was successful, false otherwise. + */ bool Collator::import_new_shard_top_blocks() { if (shard_block_descr_.empty()) { return true; @@ -1482,6 +1811,13 @@ bool Collator::import_new_shard_top_blocks() { return true; } +/** + * Registers the shard block creators to block_create_count_ + * + * @param creator_list A vector of Bits256 representing the shard block creators. + * + * @returns True if the registration was successful, False otherwise. + */ bool Collator::register_shard_block_creators(std::vector creator_list) { for (const auto& x : creator_list) { LOG(DEBUG) << "registering block creator " << x.to_hex(); @@ -1496,9 +1832,14 @@ bool Collator::register_shard_block_creators(std::vector creator_li return true; } +/** + * Performs pre-initialization and collates the new block. + * + * @returns True if collation is successful, false otherwise. + */ bool Collator::try_collate() { if (!preinit_complete) { - LOG(DEBUG) << "running do_preinit()"; + LOG(WARNING) << "running do_preinit()"; if (!do_preinit()) { return fatal_error(-667, "error preinitializing data required by collator"); } @@ -1555,6 +1896,14 @@ bool Collator::try_collate() { return do_collate(); } +/** + * Adjusts one entry from the processed up to information using the masterchain state that is referenced in the entry. + * + * @param proc The MsgProcessedUpto object. + * @param owner The shard that the MsgProcessesUpto information is taken from. + * + * @returns True if the processed up to information was successfully adjusted, false otherwise. + */ bool Collator::fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton::ShardIdFull& owner) { if (proc.compute_shard_end_lt) { return true; @@ -1571,6 +1920,13 @@ bool Collator::fix_one_processed_upto(block::MsgProcessedUpto& proc, const ton:: return (bool)proc.compute_shard_end_lt; } +/** + * Adjusts the processed up to collection using the using the auxilliary masterchain states. + * + * @param upto The MsgProcessedUptoCollection to be adjusted. + * + * @returns True if all entries were successfully adjusted, False otherwise. + */ bool Collator::fix_processed_upto(block::MsgProcessedUptoCollection& upto) { for (auto& entry : upto.list) { if (!fix_one_processed_upto(entry, upto.owner)) { @@ -1580,10 +1936,25 @@ bool Collator::fix_processed_upto(block::MsgProcessedUptoCollection& upto) { return true; } +/** + * Initializes the unix time for the new block. + * + * Unix time is set based on the current time, and the timestamps of the previous blocks. + * If the previous block has a timestamp too far in the past then skipping importing external messages and new shard blocks is allowed. + * + * @returns True if the initialization is successful, false otherwise. + */ bool Collator::init_utime() { CHECK(config_); // consider unixtime and lt from previous block(s) of the same shardchain prev_now_ = prev_state_utime_; + // Extend collator timeout if previous block is too old + td::Timestamp new_timeout = td::Timestamp::in(std::min(30.0, (td::Clocks::system() - (double)prev_now_) / 2)); + if (timeout < new_timeout) { + timeout = new_timeout; + alarm_timestamp() = timeout; + } + auto prev = std::max(config_->utime, prev_now_); now_ = std::max(prev + 1, (unsigned)std::time(nullptr)); if (now_ > now_upper_limit_) { @@ -1625,6 +1996,9 @@ bool Collator::init_utime() { return true; } +/** + * Initializes the logical time of the new block. + */ bool Collator::init_lt() { CHECK(config_); start_lt = config_->lt; @@ -1647,6 +2021,11 @@ bool Collator::init_lt() { return true; } +/** + * Fetches and initializes the configuration parameters using the masterchain configuration. + * + * @returns True if the configuration parameters were successfully fetched and initialized, false otherwise. + */ bool Collator::fetch_config_params() { auto res = block::FetchConfigParams::fetch_config_params(*config_, &old_mparams_, &storage_prices_, &storage_phase_cfg_, @@ -1661,6 +2040,13 @@ bool Collator::fetch_config_params() { return true; } +/** + * Computes the amount of extra currencies to be minted. + * + * @param to_mint A reference to the CurrencyCollection object to store the minted amount. + * + * @returns True if the computation is successful, false otherwise. + */ bool Collator::compute_minted_amount(block::CurrencyCollection& to_mint) { if (!is_masterchain()) { return to_mint.set_zero(); @@ -1722,6 +2108,11 @@ bool Collator::create_output_queue_merger() { return true; } +/** + * Initializes value_flow_ and computes fees for creating the new block. + * + * @returns True if the initialization is successful, false otherwise. + */ bool Collator::init_value_create() { value_flow_.created.set_zero(); value_flow_.minted.set_zero(); @@ -1752,11 +2143,14 @@ bool Collator::init_value_create() { return true; } +/** + * Performs the collation of the new block. + */ bool Collator::do_collate() { // After do_collate started it will not be interrupted by timeout alarm_timestamp() = td::Timestamp::never(); - LOG(DEBUG) << "do_collate() : start"; + LOG(WARNING) << "do_collate() : start"; if (!fetch_config_params()) { return fatal_error("cannot fetch required configuration parameters from masterchain state"); } @@ -1877,6 +2271,14 @@ bool Collator::do_collate() { return true; } +/** + * Dequeues an outbound message from the message queue of this shard. + * + * @param msg_envelope The message envelope to dequeue. + * @param delivered_lt The logical time at which the message was delivered. + * + * @returns True if the message was successfully dequeued, false otherwise. + */ bool Collator::dequeue_message(Ref msg_envelope, ton::LogicalTime delivered_lt) { LOG(DEBUG) << "dequeueing outbound message"; vm::CellBuilder cb; @@ -1896,6 +2298,13 @@ bool Collator::dequeue_message(Ref msg_envelope, ton::LogicalTime deli } } +/** + * Cleans up the outbound message queue by removing messages that have already been imported by neighbors. + * + * Cleanup may be interrupted early if it takes too long. + * + * @returns True if the cleanup operation was successful, false otherwise. + */ bool Collator::out_msg_queue_cleanup() { LOG(INFO) << "cleaning outbound queue from messages already imported by neighbors"; if (verbosity >= 2) { @@ -1904,106 +2313,158 @@ bool Collator::out_msg_queue_cleanup() { block::gen::t_OutMsgQueue.print(std::cerr, *rt); rt->print_rec(std::cerr); } - for (const auto& nb : neighbors_) { - if (!nb.is_disabled() && (!nb.processed_upto || !nb.processed_upto->can_check_processed())) { - return fatal_error(-667, PSTRING() << "internal error: no info for checking processed messages from neighbor " - << nb.blk_.to_str()); - } - } - auto queue_root = out_msg_queue_->get_root_cell(); - if (queue_root.is_null()) { - LOG(DEBUG) << "out_msg_queue is empty"; - return true; - } - // Unwrap UsageCell: don't build proof for visiting output queue (unless something is deleted) - auto r_cell = queue_root->load_cell(); - if (r_cell.is_error()) { - return fatal_error(r_cell.move_as_error()); - } - auto pure_out_msg_queue = - std::make_unique(r_cell.move_as_ok().data_cell, 352, block::tlb::aug_OutMsgQueue); + if (after_merge_) { + // We need to clean the whole queue after merge + // Queue is not too big, see const MERGE_MAX_QUEUE_SIZE + for (const auto& nb : neighbors_) { + if (!nb.is_disabled() && (!nb.processed_upto || !nb.processed_upto->can_check_processed())) { + return fatal_error(-667, PSTRING() << "internal error: no info for checking processed messages from neighbor " + << nb.blk_.to_str()); + } + } + td::uint32 deleted = 0; + auto res = out_msg_queue_->filter([&](vm::CellSlice& cs, td::ConstBitPtr key, int n) -> int { + assert(n == 352); + block::EnqueuedMsgDescr enq_msg_descr; + unsigned long long created_lt; + if (!(cs.fetch_ulong_bool(64, created_lt) // augmentation + && enq_msg_descr.unpack(cs) // unpack EnqueuedMsg + && enq_msg_descr.check_key(key) // check key + && enq_msg_descr.lt_ == created_lt)) { + LOG(ERROR) << "cannot unpack EnqueuedMsg with key " << key.to_hex(n); + return -1; + } + LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_; + bool delivered = false; + ton::LogicalTime deliver_lt = 0; + for (const auto& neighbor : neighbors_) { + // could look up neighbor with shard containing enq_msg_descr.next_prefix more efficiently + // (instead of checking all neighbors) + if (!neighbor.is_disabled() && neighbor.processed_upto->already_processed(enq_msg_descr)) { + delivered = true; + deliver_lt = neighbor.end_lt(); + break; + } + } + if (delivered) { + ++deleted; + CHECK(out_msg_queue_size_ > 0); + --out_msg_queue_size_; + LOG(DEBUG) << "outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex() + << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ << " has been already delivered, dequeueing"; + if (!dequeue_message(std::move(enq_msg_descr.msg_env_), deliver_lt)) { + fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") by inserting a msg_export_deq record"); + return -1; + } + register_out_msg_queue_op(); + if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { + block_full_ = true; + } + } + return !delivered; + }); + LOG(WARNING) << "deleted " << deleted << " messages from out_msg_queue after merge, remaining queue size is " + << out_msg_queue_size_; + if (res < 0) { + return fatal_error("error scanning/updating OutMsgQueue"); + } + } else { + std::vector> queue_parts; - int deleted = 0; - bool fail = false; - pure_out_msg_queue->check_for_each([&](Ref value, td::ConstBitPtr key, int n) -> bool { - assert(n == 352); - vm::CellSlice& cs = value.write(); - // LOG(DEBUG) << "key is " << key.to_hex(n); - if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) { - LOG(WARNING) << "cleaning up outbound queue takes too long, ending"; - outq_cleanup_partial_ = true; - return false; // retain all remaining outbound queue entries including this one without processing + block::OutputQueueMerger::Neighbor this_queue{BlockIdExt{new_id} /* block id is only used for logs */, + out_msg_queue_->get_root_cell()}; + for (const auto& nb : neighbors_) { + if (nb.is_disabled()) { + continue; + } + if (!nb.processed_upto || !nb.processed_upto->can_check_processed()) { + return fatal_error(-667, PSTRING() << "internal error: no info for checking processed messages from neighbor " + << nb.blk_.to_str()); + } + queue_parts.emplace_back(block::OutputQueueMerger{nb.shard(), {this_queue}}, &nb); } - if (block_full_) { - LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially"; - outq_cleanup_partial_ = true; - return false; // retain all remaining outbound queue entries including this one without processing - } - block::EnqueuedMsgDescr enq_msg_descr; - unsigned long long created_lt; - if (!(cs.fetch_ulong_bool(64, created_lt) // augmentation - && enq_msg_descr.unpack(cs) // unpack EnqueuedMsg - && enq_msg_descr.check_key(key) // check key - && enq_msg_descr.lt_ == created_lt)) { - LOG(ERROR) << "cannot unpack EnqueuedMsg with key " << key.to_hex(n); - fail = true; - return false; - } - LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," - << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_; - bool delivered = false; - ton::LogicalTime deliver_lt = 0; - for (const auto& neighbor : neighbors_) { - // could look up neighbor with shard containing enq_msg_descr.next_prefix more efficiently - // (instead of checking all neighbors) - if (!neighbor.is_disabled() && neighbor.processed_upto->already_processed(enq_msg_descr)) { - delivered = true; - deliver_lt = neighbor.end_lt(); + + size_t i = 0; + td::uint32 deleted = 0; + while (!queue_parts.empty()) { + if (block_full_) { + LOG(WARNING) << "BLOCK FULL while cleaning up outbound queue, cleanup completed only partially"; break; } - } - if (delivered) { - LOG(DEBUG) << "outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," << enq_msg_descr.hash_.to_hex() - << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ << " has been already delivered, dequeueing"; - // Get value from out_msg_queue_ instead of pure_out_msg_queue (for proof) - auto value2 = out_msg_queue_->lookup_delete_with_extra(key, n); - CHECK(value2.not_null()); - vm::CellSlice& cs2 = value2.write(); - CHECK(cs2.fetch_ulong_bool(64, created_lt) // augmentation - && enq_msg_descr.unpack(cs2) // unpack EnqueuedMsg - && enq_msg_descr.check_key(key) // check key - && enq_msg_descr.lt_ == created_lt); - - if (!dequeue_message(std::move(enq_msg_descr.msg_env_), deliver_lt)) { - fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," - << enq_msg_descr.hash_.to_hex() << ") by inserting a msg_export_deq record"); - fail = true; - return false; + if (queue_cleanup_timeout_.is_in_past(td::Timestamp::now())) { + LOG(WARNING) << "cleaning up outbound queue takes too long, ending"; + break; } - register_out_msg_queue_op(); - if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { - block_full_ = true; + if (i == queue_parts.size()) { + i = 0; } + auto& queue = queue_parts.at(i).first; + auto nb = queue_parts.at(i).second; + auto kv = queue.extract_cur(); + if (kv) { + block::EnqueuedMsgDescr enq_msg_descr; + if (!(enq_msg_descr.unpack(kv->msg.write()) // unpack EnqueuedMsg + && enq_msg_descr.check_key(kv->key.cbits()) // check key + )) { + return fatal_error(PSTRING() << "error scanning/updating OutMsgQueue: cannot unpack EnqueuedMsg with key " + << kv->key.to_hex()); + } + if (nb->processed_upto->already_processed(enq_msg_descr)) { + LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ + << ": message has been already delivered, dequeueing"; + ++deleted; + CHECK(out_msg_queue_size_ > 0); + --out_msg_queue_size_; + out_msg_queue_->lookup_delete_with_extra(kv->key.cbits(), kv->key_len); + if (!dequeue_message(std::move(enq_msg_descr.msg_env_), nb->end_lt())) { + return fatal_error(PSTRING() << "cannot dequeue outbound message with (lt,hash)=(" << enq_msg_descr.lt_ + << "," << enq_msg_descr.hash_.to_hex() + << ") by inserting a msg_export_deq record"); + } + register_out_msg_queue_op(); + if (!block_limit_status_->fits(block::ParamLimits::cl_normal)) { + block_full_ = true; + } + queue.next(); + ++i; + continue; + } else { + LOG(DEBUG) << "scanning outbound message with (lt,hash)=(" << enq_msg_descr.lt_ << "," + << enq_msg_descr.hash_.to_hex() << ") enqueued_lt=" << enq_msg_descr.enqueued_lt_ + << ": message has not been delivered"; + } + } + LOG(DEBUG) << "no more unprocessed messages to shard " << nb->shard().to_str(); + std::swap(queue_parts[i], queue_parts.back()); + queue_parts.pop_back(); } - return true; - }); - LOG(DEBUG) << "deleted " << deleted << " messages from out_msg_queue"; - if (fail) { - return fatal_error("error scanning/updating OutMsgQueue"); + LOG(WARNING) << "deleted " << deleted << " messages from out_msg_queue, remaining queue size is " + << out_msg_queue_size_; } - auto rt = out_msg_queue_->get_root(); if (verbosity >= 2) { + auto rt = out_msg_queue_->get_root(); std::cerr << "new out_msg_queue is "; block::gen::t_OutMsgQueue.print(std::cerr, *rt); rt->print_rec(std::cerr); } - // CHECK(block::gen::t_OutMsgQueue.validate_upto(100000, *rt)); // DEBUG, comment later if SLOW return register_out_msg_queue_op(true); } +/** + * Creates a new Account object from the given address and serialized account data. + * + * @param addr A pointer to the 256-bit address of the account. + * @param account A cell slice with an account serialized using ShardAccount TLB-scheme. + * @param force_create A flag indicating whether to force the creation of a new account if `account` is null. + * + * @returns A unique pointer to the created Account object, or nullptr if the creation failed. + */ std::unique_ptr Collator::make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra, bool force_create) { + bool force_create) { if (account.is_null() && !force_create) { return nullptr; } @@ -2012,19 +2473,35 @@ std::unique_ptr Collator::make_account_from(td::ConstBitPtr addr if (!ptr->init_new(now_)) { return nullptr; } - } else if (!ptr->unpack(std::move(account), std::move(extra), now_, - is_masterchain() && config_->is_special_smartcontract(addr))) { + } else if (!ptr->unpack(std::move(account), now_, is_masterchain() && config_->is_special_smartcontract(addr))) { return nullptr; } ptr->block_lt = start_lt; return ptr; } +/** + * Looks up an account in the Collator's account map. + * + * @param addr A pointer to the 256-bit address of the account to be looked up. + * + * @returns A pointer to the Account object if found, otherwise returns nullptr. + */ block::Account* Collator::lookup_account(td::ConstBitPtr addr) const { auto found = accounts.find(addr); return found != accounts.end() ? found->second.get() : nullptr; } +/** + * Retreives an Account object from the data in the shard state. + * Accounts are cached in the Collator's map. + * + * @param addr The 256-bit address of the account. + * @param force_create Flag indicating whether to create a new account if it does not exist. + * + * @returns A Result object containing a pointer to the account if found or created successfully, or an error status. + * Returns nullptr if account does not exist and not force_create. + */ td::Result Collator::make_account(td::ConstBitPtr addr, bool force_create) { auto found = lookup_account(addr); if (found) { @@ -2036,7 +2513,7 @@ td::Result Collator::make_account(td::ConstBitPtr addr, bool fo return nullptr; } } - auto new_acc = make_account_from(addr, std::move(dict_entry.first), std::move(dict_entry.second), force_create); + auto new_acc = make_account_from(addr, std::move(dict_entry.first), force_create); if (!new_acc) { return td::Status::Error(PSTRING() << "cannot load account " << addr.to_hex(256) << " from previous state"); } @@ -2052,6 +2529,11 @@ td::Result Collator::make_account(td::ConstBitPtr addr, bool fo return ins.first->second.get(); } +/** + * Combines account transactions and updates the ShardAccountBlocks and ShardAccounts. + * + * @returns True if the operation is successful, false otherwise. + */ bool Collator::combine_account_transactions() { vm::AugmentedDictionary dict{256, block::tlb::aug_ShardAccountBlocks}; for (auto& z : accounts) { @@ -2164,6 +2646,15 @@ bool Collator::combine_account_transactions() { return true; } +/** + * Creates a special transaction to recover a specified amount of currency to a destination address. + * + * @param amount The amount of currency to recover. + * @param dest_addr_cell The cell containing the destination address. + * @param in_msg The reference to the input message. + * + * @returns True if the special transaction was created successfully, false otherwise. + */ bool Collator::create_special_transaction(block::CurrencyCollection amount, Ref dest_addr_cell, Ref& in_msg) { if (amount.is_zero()) { @@ -2204,12 +2695,27 @@ bool Collator::create_special_transaction(block::CurrencyCollection amount, Ref< return true; } +/** + * Creates special transactions for retreiving fees and minted currencies. + * Used in masterchain collator. + * + * @returns True if both special transactions were + */ bool Collator::create_special_transactions() { CHECK(is_masterchain()); return create_special_transaction(value_flow_.recovered, config_->get_config_param(3, 1), recover_create_msg_) && create_special_transaction(value_flow_.minted, config_->get_config_param(2, 0), mint_msg_); } +/** + * Creates a tick-tock transaction for a given smart contract. + * + * @param smc_addr The address of the smart contract. + * @param req_start_lt The requested start logical time for the transaction. + * @param mask The value indicating wheter the thansaction is tick (mask == 2) or tock (mask == 1). + * + * @returns True if the transaction was created successfully, false otherwise. + */ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, ton::LogicalTime req_start_lt, int mask) { auto acc_res = make_account(smc_addr.cbits(), false); @@ -2251,7 +2757,7 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t return fatal_error(td::Status::Error( -666, std::string{"cannot serialize new transaction for smart contract "} + smc_addr.to_hex())); } - if (!trans->update_limits(*block_limit_status_)) { + if (!trans->update_limits(*block_limit_status_, /* with_gas = */ false)) { return fatal_error(-666, "cannot update block limit status to include the new transaction"); } if (trans->commit(*acc).is_null()) { @@ -2263,7 +2769,15 @@ bool Collator::create_ticktock_transaction(const ton::StdSmcAddress& smc_addr, t return true; } -Ref Collator::create_ordinary_transaction(Ref msg_root) { +/** + * Creates an ordinary transaction using a given message. + * + * @param msg_root The root of the message to be processed serialized using Message TLB-scheme. + * @param is_special_tx True if creating a special transaction (mint/recover), false otherwise. + * + * @returns The root of the serialized transaction, or an empty reference if the transaction creation fails. + */ +Ref Collator::create_ordinary_transaction(Ref msg_root, bool is_special_tx) { ton::StdSmcAddress addr; auto cs = vm::load_cell_slice(msg_root); bool external; @@ -2321,7 +2835,8 @@ Ref Collator::create_ordinary_transaction(Ref msg_root) { } std::unique_ptr trans = res.move_as_ok(); - if (!trans->update_limits(*block_limit_status_)) { + if (!trans->update_limits(*block_limit_status_, + /* with_gas = */ !(is_special_tx && compute_phase_cfg_.special_gas_full))) { fatal_error("cannot update block limit status to include the new transaction"); return {}; } @@ -2337,8 +2852,23 @@ Ref Collator::create_ordinary_transaction(Ref msg_root) { return trans_root; } -// If td::status::error_code == 669 - Fatal Error block can not be produced -// if td::status::error_code == 701 - Transaction can not be included into block, but it's ok (external or too early internal) +/** + * Creates an ordinary transaction using given parameters. + * + * @param msg_root The root of the message to be processed serialized using Message TLB-scheme. + * @param acc The account for which the transaction is being created. + * @param utime The Unix time of the transaction. + * @param lt The minimal logical time of the transaction. + * @param storage_phase_cfg The configuration for the storage phase of the transaction. + * @param compute_phase_cfg The configuration for the compute phase of the transaction. + * @param action_phase_cfg The configuration for the action phase of the transaction. + * @param external Flag indicating if the message is external. + * @param after_lt The logical time after which the transaction should occur. Used only for external messages. + * + * @returns A Result object containing the created transaction. + * Returns error_code == 669 if the error is fatal and the block can not be produced. + * Returns error_code == 701 if the transaction can not be included into block, but it's ok (external or too early internal). + */ td::Result> Collator::impl_create_ordinary_transaction(Ref msg_root, block::Account* acc, UnixTime utime, LogicalTime lt, @@ -2419,6 +2949,11 @@ td::Result> Collator::impl_crea return std::move(trans); } +/** + * Updates the maximum logical time if the given logical time is greater than the current maximum logical time. + * + * @param lt The logical time to be compared. + */ void Collator::update_max_lt(ton::LogicalTime lt) { CHECK(lt >= start_lt); if (lt > max_lt) { @@ -2426,6 +2961,13 @@ void Collator::update_max_lt(ton::LogicalTime lt) { } } +/** + * Updates information on the last processed internal message with a new logical time and hash. + * + * @param new_lt_hash The new logical time and hash pair. + * + * @returns True if the last processed internal message was successfully updated, false otherwise. + */ bool Collator::update_last_proc_int_msg(const std::pair& new_lt_hash) { if (last_proc_int_msg_ < new_lt_hash) { last_proc_int_msg_ = new_lt_hash; @@ -2440,6 +2982,14 @@ bool Collator::update_last_proc_int_msg(const std::pair addr_ref) const { return is_our_address(block::tlb::t_MsgAddressInt.get_prefix(std::move(addr_ref))); } +/** + * Checks if the given account ID prefix belongs to the current shard. + * + * @param addr_pfx The account ID prefix to check. + * + * @returns True if the account ID prefix belongs to the current shard, False otherwise. + */ bool Collator::is_our_address(ton::AccountIdPrefixFull addr_pfx) const { return ton::shard_contains(shard_, addr_pfx); } +/** + * Checks if the given address belongs to the current shard. + * + * @param addr The address to check. + * + * @returns True if the address belongs to the current shard, False otherwise. + */ bool Collator::is_our_address(const ton::StdSmcAddress& addr) const { return ton::shard_contains(get_shard(), addr); } -// 1 = processed, 0 = enqueued, 3 = processed, all future messages must be enqueued +/** + * Processes a message generated in this block. + * + * @param msg The new message to be processed. + * @param enqueue_only Flag indicating whether the message should only be enqueued. + * @param is_special New message if creating a special transaction, nullptr otherwise. + * + * @returns Returns: + * 0 - message was enqueued. + * 1 - message was processed. + * 3 - message was processed, all future messages must be enqueued. + * -1 - error occured. + */ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, Ref* is_special) { Ref src, dest; bool enqueue, external; @@ -2526,7 +3109,7 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R return -1; } // 1. create a Transaction processing this Message - auto trans_root = create_ordinary_transaction(msg.msg); + auto trans_root = create_ordinary_transaction(msg.msg, is_special != nullptr); if (trans_root.is_null()) { fatal_error("cannot create transaction for re-processing output message"); return -1; @@ -2578,11 +3161,22 @@ int Collator::process_one_new_message(block::NewOutMsg msg, bool enqueue_only, R return 1; } -// very similar to enqueue_message(), but for transit messages +/** + * Enqueues a transit message. + * Very similar to enqueue_message(), but for transit messages. + * + * @param msg The message to be enqueued. + * @param old_msg_env The previous message envelope. + * @param prev_prefix The account ID prefix for this shard. + * @param cur_prefix The account ID prefix for the next hop. + * @param dest_prefix The prefix of the destination account ID. + * @param fwd_fee_remaining The remaining forward fee. + * + * @returns True if the transit message is successfully enqueued, false otherwise. + */ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_env, ton::AccountIdPrefixFull prev_prefix, ton::AccountIdPrefixFull cur_prefix, - ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining, - ton::LogicalTime enqueued_lt) { + ton::AccountIdPrefixFull dest_prefix, td::RefInt256 fwd_fee_remaining) { LOG(DEBUG) << "enqueueing transit message " << msg->get_hash().bits().to_hex(256); bool requeue = is_our_address(prev_prefix); // 1. perform hypercube routing @@ -2643,6 +3237,7 @@ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_ try { LOG(DEBUG) << "inserting into outbound queue message with (lt,key)=(" << start_lt << "," << key.to_hex() << ")"; ok = out_msg_queue_->set_builder(key.bits(), 352, cb, vm::Dictionary::SetMode::Add); + ++out_msg_queue_size_; } catch (vm::VmError) { ok = false; } @@ -2653,11 +3248,20 @@ bool Collator::enqueue_transit_message(Ref msg, Ref old_msg_ return register_out_msg_queue_op(); } +/** + * Deletes a message from the outbound message queue. + * + * @param key The key of the message to be deleted. + * + * @returns True if the message was successfully deleted, false otherwise. + */ bool Collator::delete_out_msg_queue_msg(td::ConstBitPtr key) { Ref queue_rec; try { LOG(DEBUG) << "deleting from outbound queue message with key=" << key.to_hex(352); queue_rec = out_msg_queue_->lookup_delete(key, 352); + CHECK(out_msg_queue_size_ > 0); + --out_msg_queue_size_; } catch (vm::VmError err) { LOG(ERROR) << "error deleting from out_msg_queue dictionary: " << err.get_msg(); } @@ -2697,6 +3301,17 @@ bool Collator::precheck_inbound_message(Ref enq_msg, ton::Logical return true; } +/** + * Processes an inbound message from a neighbor's outbound queue. + * The message may create a transaction or be enqueued. + * + * @param enq_msg The inbound message serialized using EnqueuedMsg TLB-scheme. + * @param lt The logical time of the message. + * @param key The 32+64+256-bit key of the message. + * @param src_nb The description of the source neighbor shard. + * + * @returns True if the message was processed successfully, false otherwise. + */ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalTime lt, td::ConstBitPtr key, const block::McShardDescr& src_nb) { ton::LogicalTime enqueued_lt = enq_msg->prefetch_ulong(64); @@ -2800,7 +3415,7 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT // destination is outside our shard, relay transit message // (very similar to enqueue_message()) if (!enqueue_transit_message(std::move(env.msg), std::move(msg_env), cur_prefix, next_prefix, dest_prefix, - std::move(env.fwd_fee_remaining), max_lt)) { + std::move(env.fwd_fee_remaining))) { return fatal_error("cannot enqueue transit internal message with key "s + key.to_hex(352)); } return !our || delete_out_msg_queue_msg(key); @@ -2842,6 +3457,12 @@ bool Collator::process_inbound_message(Ref enq_msg, ton::LogicalT return true; } +/** + * Processes inbound internal messages from message queues of the neighbors. + * Messages are processed until the normal limit is reached, soft timeout is reached or there are no more messages. + * + * @returns True if the processing was successful, false otherwise. + */ bool Collator::process_inbound_internal_messages() { while (!nb_out_msgs_->is_eof()) { block_full_ = !block_limit_status_->fits(block::ParamLimits::cl_normal); @@ -2890,11 +3511,22 @@ bool Collator::process_inbound_internal_messages() { return true; } +/** + * Processes inbound external messages. + * Messages are processed until the soft limit is reached, medium timeout is reached or there are no more messages. + * + * @returns True if the processing was successful, false otherwise. + */ bool Collator::process_inbound_external_messages() { if (skip_extmsg_) { LOG(INFO) << "skipping processing of inbound external messages"; return true; } + if (out_msg_queue_size_ > SKIP_EXTERNALS_QUEUE_SIZE) { + LOG(INFO) << "skipping processing of inbound external messages because out_msg_queue is too big (" + << out_msg_queue_size_ << " > " << SKIP_EXTERNALS_QUEUE_SIZE << ")"; + return true; + } bool full = !block_limit_status_->fits(block::ParamLimits::cl_soft); for (auto& ext_msg_pair : ext_msg_list_) { if (full) { @@ -2928,7 +3560,17 @@ bool Collator::process_inbound_external_messages() { return true; } -// 1 = processed, 0 = skipped, 3 = processed, all future messages must be skipped (block overflown) +/** + * Processes an external message. + * + * @param msg The message to be processed serialized as Message TLB-scheme. + * + * @returns The result of processing the message: + * -1 if a fatal error occurred. + * 0 if the message is rejected. + * 1 if the message was processed. + * 3 if the message was processed and all future messages must be skipped (block overflown). + */ int Collator::process_external_message(Ref msg) { auto cs = load_cell_slice(msg); td::RefInt256 fwd_fees; @@ -2965,7 +3607,13 @@ int Collator::process_external_message(Ref msg) { return 1; } -// inserts an InMsg into InMsgDescr +/** + * Inserts an InMsg into the block's InMsgDescr. + * + * @param in_msg The input message to be inserted. + * + * @returns True if the insertion is successful, false otherwise. + */ bool Collator::insert_in_msg(Ref in_msg) { if (verbosity > 2) { std::cerr << "InMsg being inserted into InMsgDescr: "; @@ -3000,7 +3648,13 @@ bool Collator::insert_in_msg(Ref in_msg) { ((in_descr_cnt_ & 63) || block_limit_status_->add_cell(in_msg_dict->get_root_cell())); } -// inserts an OutMsg into OutMsgDescr +/** + * Inserts an OutMsg into the block's OutMsgDescr. + * + * @param out_msg The outgoing message to be inserted. + * + * @returns True if the insertion was successful, false otherwise. + */ bool Collator::insert_out_msg(Ref out_msg) { if (verbosity > 2) { std::cerr << "OutMsg being inserted into OutMsgDescr: "; @@ -3023,6 +3677,14 @@ bool Collator::insert_out_msg(Ref out_msg) { return insert_out_msg(std::move(out_msg), msg->get_hash().bits()); } +/** + * Inserts an outgoing message into the block's OutMsgDescr dictionary. + * + * @param out_msg The outgoing message to be inserted. + * @param msg_hash The 256-bit hash of the outgoing message. + * + * @returns True if the insertion was successful, false otherwise. + */ bool Collator::insert_out_msg(Ref out_msg, td::ConstBitPtr msg_hash) { bool ok; try { @@ -3039,7 +3701,15 @@ bool Collator::insert_out_msg(Ref out_msg, td::ConstBitPtr msg_hash) { ((out_descr_cnt_ & 63) || block_limit_status_->add_cell(out_msg_dict->get_root_cell())); } -// enqueues a new Message into OutMsgDescr and OutMsgQueue +/** + * Enqueues a new message into the block's outbound message queue and OutMsgDescr. + * + * @param msg The new outbound message to enqueue. + * @param fwd_fees_remaining The remaining forward fees for the message. + * @param enqueued_lt The logical time at which the message is enqueued. + * + * @returns True if the message was successfully enqueued, false otherwise. + */ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_remaining, ton::LogicalTime enqueued_lt) { // 0. unpack src_addr and dest_addr block::gen::CommonMsgInfo::Record_int_msg_info info; @@ -3094,6 +3764,7 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema LOG(DEBUG) << "inserting into outbound queue a new message with (lt,key)=(" << start_lt << "," << key.to_hex() << ")"; ok = out_msg_queue_->set_builder(key.bits(), 352, cb, vm::Dictionary::SetMode::Add); + ++out_msg_queue_size_; } catch (vm::VmError) { ok = false; } @@ -3104,6 +3775,13 @@ bool Collator::enqueue_message(block::NewOutMsg msg, td::RefInt256 fwd_fees_rema return register_out_msg_queue_op(); } +/** + * Processes new messages that were generated in this block. + * + * @param enqueue_only If true, only enqueue the new messages without creating transactions. + * + * @returns True if all new messages were processed successfully, false otherwise. + */ bool Collator::process_new_messages(bool enqueue_only) { while (!new_msgs.empty()) { block::NewOutMsg msg = new_msgs.top(); @@ -3125,6 +3803,11 @@ bool Collator::process_new_messages(bool enqueue_only) { return true; } +/** + * Registers a new output message. + * + * @param new_msg The new output message to be registered. + */ void Collator::register_new_msg(block::NewOutMsg new_msg) { if (new_msg.lt < min_new_msg_lt) { min_new_msg_lt = new_msg.lt; @@ -3133,6 +3816,11 @@ void Collator::register_new_msg(block::NewOutMsg new_msg) { block_limit_status_->extra_out_msgs++; } +/** + * Registers new messages that were created in the transaction. + * + * @param trans The transaction containing the messages. + */ void Collator::register_new_msgs(block::transaction::Transaction& trans) { CHECK(trans.root.not_null()); for (unsigned i = 0; i < trans.out_msgs.size(); i++) { @@ -3146,6 +3834,15 @@ void Collator::register_new_msgs(block::transaction::Transaction& trans) { * */ +/** + * Stores an external block reference to a CellBuilder object. + * + * @param cb The CellBuilder object to store the reference in. + * @param id_ext The block ID. + * @param end_lt The end logical time of the block. + * + * @returns True if the reference was successfully stored, false otherwise. + */ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, ton::LogicalTime end_lt) { return cb.store_long_bool(end_lt, 64) // end_lt:uint64 && cb.store_long_bool(id_ext.seqno(), 32) // seq_no:uint32 @@ -3153,6 +3850,15 @@ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, to && cb.store_bits_bool(id_ext.file_hash); // file_hash:bits256 } +/** + * Stores an external block reference to a CellBuilder. + * + * @param cb The CellBuilder to store the reference in. + * @param id_ext The block ID. + * @param blk_root The root of the block. + * + * @returns True if the reference was successfully stored, false otherwise. + */ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, Ref blk_root) { block::gen::Block::Record rec; block::gen::BlockInfo::Record info; @@ -3167,6 +3873,19 @@ bool store_ext_blk_ref_to(vm::CellBuilder& cb, const ton::BlockIdExt& id_ext, Re && store_ext_blk_ref_to(cb, id_ext, info.end_lt); // store } +/** + * Updates one shard description in the masterchain shard configuration. + * Used in masterchain collator. + * + * @param info The shard information to be updated. + * @param sibling The sibling shard information. + * @param wc_info The workchain information. + * @param now The current Unix time. + * @param ccvc The Catchain validators configuration. + * @param update_cc Flag indicating whether to update the Catchain seqno. + * + * @returns A boolean value indicating whether the shard description has changed. + */ static int update_one_shard(block::McShardHash& info, const block::McShardHash* sibling, const block::WorkchainInfo* wc_info, ton::UnixTime now, const block::CatchainValidatorsConfig& ccvc, bool update_cc) { @@ -3219,6 +3938,16 @@ static int update_one_shard(block::McShardHash& info, const block::McShardHash* return changed; } +/** + * Updates the shard configuration in the masterchain. + * Used in masterchain collator. + * + * @param wc_set The set of workchains. + * @param ccvc The Catchain validators configuration. + * @param update_cc A boolean indicating whether to update the Catchain seqno. + * + * @returns True if the shard configuration was successfully updated, false otherwise. + */ bool Collator::update_shard_config(const block::WorkchainSet& wc_set, const block::CatchainValidatorsConfig& ccvc, bool update_cc) { LOG(DEBUG) << "updating shard configuration (update_cc=" << update_cc << ")"; @@ -3245,6 +3974,12 @@ bool Collator::update_shard_config(const block::WorkchainSet& wc_set, const bloc }); } +/** + * Creates McStateExtra. + * Used in masterchain collator. + * + * @returns True if the creation is successful, false otherwise. + */ bool Collator::create_mc_state_extra() { if (!is_masterchain()) { CHECK(mc_state_extra_.is_null()); @@ -3463,6 +4198,16 @@ bool Collator::create_mc_state_extra() { return true; } +/** + * Updates the `block_creator_stats_` for a given key. + * Used in masterchain collator. + * + * @param key The 256-bit key of the creator. + * @param shard_incr The increment value for the shardchain block counter. + * @param mc_incr The increment value for the masterchain block counter. + * + * @returns True if the block creator count was successfully updated, false otherwise. + */ bool Collator::update_block_creator_count(td::ConstBitPtr key, unsigned shard_incr, unsigned mc_incr) { LOG(DEBUG) << "increasing CreatorStats for " << key.to_hex(256) << " by (" << mc_incr << ", " << shard_incr << ")"; block::DiscountedCounter mc_cnt, shard_cnt; @@ -3489,6 +4234,17 @@ bool Collator::update_block_creator_count(td::ConstBitPtr key, unsigned shard_in return true; } +/** + * Determines if the creator count is outdated for a given key. + * Used in masterchain collator. + * + * @param key The key of the creator. + * @param cs The CellSlice containing the CreatorStats. + * + * @returns -1 if there was a fatal error. + * 0 if the CreatorStats should be removed as they are stale, + * 1 if the CreatorStats are still valid. + */ int Collator::creator_count_outdated(td::ConstBitPtr key, vm::CellSlice& cs) { block::DiscountedCounter mc_cnt, shard_cnt; if (!(block::fetch_CreatorStats(cs, mc_cnt, shard_cnt) && cs.empty_ext())) { @@ -3507,6 +4263,11 @@ int Collator::creator_count_outdated(td::ConstBitPtr key, vm::CellSlice& cs) { } } +/** + * Updates `block_create_stats_` using information about creators of all new blocks. + * + * @returns True if the update was successful, false otherwise. + */ bool Collator::update_block_creator_stats() { if (!create_stats_enabled_) { return true; @@ -3557,10 +4318,25 @@ bool Collator::update_block_creator_stats() { return cnt >= 0; } +/** + * Retrieves the global masterchain config from the config contract. + * + * @param cfg_addr The address of the configuration smart contract. + * + * @returns A Result object containing a reference to the configuration data. + */ td::Result> Collator::get_config_data_from_smc(const ton::StdSmcAddress& cfg_addr) { return block::get_config_data_from_smc(account_dict->lookup_ref(cfg_addr)); } +/** + * Fetches and validates a new configuration from the configuration smart contract. + * + * @param cfg_addr The address of the configuration smart contract. + * @param new_config A reference to a vm::Cell object to store the new configuration. + * + * @returns True if the new configuration was successfully fetched, false otherwise. + */ bool Collator::try_fetch_new_config(const ton::StdSmcAddress& cfg_addr, Ref& new_config) { auto cfg_res = get_config_data_from_smc(cfg_addr); if (cfg_res.is_error()) { @@ -3578,12 +4354,26 @@ bool Collator::try_fetch_new_config(const ton::StdSmcAddress& cfg_addr, Refestimate_block_size(); @@ -3593,14 +4383,32 @@ bool Collator::check_block_overload() { << " collated_size_estimate=" << block_limit_status_->collated_data_stat.estimate_proof_size(); auto cl = block_limit_status_->classify(); if (cl <= block::ParamLimits::cl_underload) { - underload_history_ |= 1; - LOG(INFO) << "block is underloaded"; + if (out_msg_queue_size_ > MERGE_MAX_QUEUE_SIZE) { + LOG(INFO) + << "block is underloaded, but don't set underload history because out_msg_queue size is too big to merge (" + << out_msg_queue_size_ << " > " << MERGE_MAX_QUEUE_SIZE << ")"; + } else { + underload_history_ |= 1; + LOG(INFO) << "block is underloaded"; + } } else if (cl >= block::ParamLimits::cl_soft) { - overload_history_ |= 1; - LOG(INFO) << "block is overloaded (category " << cl << ")"; + if (out_msg_queue_size_ > SPLIT_MAX_QUEUE_SIZE) { + LOG(INFO) << "block is overloaded (category " << cl + << "), but don't set overload history because out_msg_queue size is too big to split (" + << out_msg_queue_size_ << " > " << SPLIT_MAX_QUEUE_SIZE << ")"; + } else { + overload_history_ |= 1; + LOG(INFO) << "block is overloaded (category " << cl << ")"; + } } else { LOG(INFO) << "block is loaded normally"; } + if (!(overload_history_ & 1) && out_msg_queue_size_ >= FORCE_SPLIT_QUEUE_SIZE && + out_msg_queue_size_ <= SPLIT_MAX_QUEUE_SIZE) { + overload_history_ |= 1; + LOG(INFO) << "setting overload history because out_msg_queue reached force split limit (" << out_msg_queue_size_ + << " >= " << FORCE_SPLIT_QUEUE_SIZE << ")"; + } if (collator_settings & 1) { LOG(INFO) << "want_split manually set"; want_split_ = true; @@ -3623,6 +4431,16 @@ bool Collator::check_block_overload() { return true; } +/** + * Processes removing a library from the collection of public libraries of an account. + * Updates the global collection of public libraries. + * Used in masterchain collator. + * + * @param key The 256-bit key of the public library to remove. + * @param addr The 256-bit address of the account where the library is removed. + * + * @returns True if the public library was successfully removed, false otherwise. + */ bool Collator::remove_public_library(td::ConstBitPtr key, td::ConstBitPtr addr) { LOG(INFO) << "Removing public library " << key.to_hex(256) << " of account " << addr.to_hex(256); auto val = shard_libraries_->lookup(key, 256); @@ -3666,6 +4484,17 @@ bool Collator::remove_public_library(td::ConstBitPtr key, td::ConstBitPtr addr) return true; } +/** + * Processes adding a library to the collection of public libraries of an account. + * Updates the global collection of public libraries. + * Used in masterchain collator. + * + * @param key The key of the public library. + * @param addr The address of the account where the library is added. + * @param library The root cell of the library. + * + * @returns True if the public library was successfully added, false otherwise. + */ bool Collator::add_public_library(td::ConstBitPtr key, td::ConstBitPtr addr, Ref library) { LOG(INFO) << "Adding public library " << key.to_hex(256) << " of account " << addr.to_hex(256); CHECK(library.not_null() && !library->get_hash().bits().compare(key, 256)); @@ -3704,6 +4533,17 @@ bool Collator::add_public_library(td::ConstBitPtr key, td::ConstBitPtr addr, Ref return true; } +/** + * Processes changes in libraries of an account. + * Updates the global collection of public libraries. + * Used in masterchain collator. + * + * @param orig_libs The original libraries of the account. + * @param final_libs The final libraries of the account. + * @param addr The address associated with the account. + * + * @returns True if the update was successful, false otherwise. + */ bool Collator::update_account_public_libraries(Ref orig_libs, Ref final_libs, const td::Bits256& addr) { vm::Dictionary dict1{std::move(orig_libs), 256}, dict2{std::move(final_libs), 256}; @@ -3721,6 +4561,13 @@ bool Collator::update_account_public_libraries(Ref orig_libs, Ref msg_q_info; vm::CellBuilder cb, cb2; @@ -3815,11 +4682,20 @@ bool Collator::create_shard_state() { return true; } -// stores BlkMasterInfo (for non-masterchain blocks) +/** + * Stores BlkMasterInfo (for non-masterchain blocks) in the provided CellBuilder. + * + * @param cb The CellBuilder to store the reference in. + * + * @returns True if the reference is successfully stored, false otherwise. + */ bool Collator::store_master_ref(vm::CellBuilder& cb) { return mc_block_root.not_null() && store_ext_blk_ref_to(cb, mc_block_id_, mc_block_root); } +/** + * Updates the processed_upto information for the new block based on the information on the last processed inbound message. + */ bool Collator::update_processed_upto() { auto ref_mc_seqno = is_masterchain() ? new_block_seqno : prev_mc_block_seqno; update_min_mc_seqno(ref_mc_seqno); @@ -3834,6 +4710,13 @@ bool Collator::update_processed_upto() { return processed_upto_->compactify(); } +/** + * Computes the outbound message queue. + * + * @param out_msg_queue_info A reference to a vm::Cell object to store the computed queue. + * + * @returns True if the computation is successful, False otherwise. + */ bool Collator::compute_out_msg_queue_info(Ref& out_msg_queue_info) { if (verbosity >= 2) { auto rt = out_msg_queue_->get_root(); @@ -3848,6 +4731,11 @@ bool Collator::compute_out_msg_queue_info(Ref& out_msg_queue_info) { && cb.finalize_to(out_msg_queue_info); } +/** + * Computes the total balance of the shard state. + * + * @returns True if the total balance computation is successful, false otherwise. + */ bool Collator::compute_total_balance() { // 1. compute total_balance_ from the augmentation value of ShardAccounts auto accounts_extra = account_dict->get_root_extra(); @@ -3898,6 +4786,13 @@ bool Collator::compute_total_balance() { return true; } +/** + * Creates BlockInfo of the new block. + * + * @param block_info A reference to the cell to put the serialized info to. + * + * @returns True if the block info cell was successfully created, false otherwise. + */ bool Collator::create_block_info(Ref& block_info) { vm::CellBuilder cb, cb2; bool mc = is_masterchain(); @@ -3935,10 +4830,24 @@ bool Collator::create_block_info(Ref& block_info) { && cb.finalize_to(block_info); } +/** + * Stores the version information in a CellBuilder. + * + * @param cb The CellBuilder object to store the version information. + * + * @returns True if the version information was successfully stored, false otherwise. + */ bool Collator::store_version(vm::CellBuilder& cb) const { return block::gen::t_GlobalVersion.pack_capabilities(cb, supported_version(), supported_capabilities()); } +/** + * Stores the zero state reference in the given CellBuilder. + * + * @param cb The CellBuilder to store the zero state reference in. + * + * @returns True if the zero state reference is successfully stored, false otherwise. + */ bool Collator::store_zero_state_ref(vm::CellBuilder& cb) { CHECK(prev_state_root_.not_null()); RootHash root_hash = prev_state_root_->get_hash().bits(); @@ -3951,6 +4860,14 @@ bool Collator::store_zero_state_ref(vm::CellBuilder& cb) { && cb.store_bits_bool(prev_blocks[0].file_hash); // file_hash:bits256 } +/** + * Stores the previous block references to the given CellBuilder. + * + * @param cb The CellBuilder object to store the references. + * @param is_after_merge A boolean indicating whether the new block after a merge. + * + * @returns True if the references are successfully stored, false otherwise. + */ bool Collator::store_prev_blk_ref(vm::CellBuilder& cb, bool is_after_merge) { if (is_after_merge) { auto root2 = prev_block_data.at(1)->root_cell(); @@ -3968,6 +4885,11 @@ bool Collator::store_prev_blk_ref(vm::CellBuilder& cb, bool is_after_merge) { } } +/** + * Validates the value flow of the block. + * + * @returns True if the value flow is correct, false otherwise. + */ bool Collator::check_value_flow() { if (!value_flow_.validate()) { LOG(ERROR) << "incorrect value flow in new block : " << value_flow_.to_str(); @@ -3977,6 +4899,13 @@ bool Collator::check_value_flow() { return true; } +/** + * Creates the BlockExtra of the new block. + * + * @param block_extra A reference to the cell to put the serialized info to. + * + * @returns True if the block extra data was successfully created, false otherwise. + */ bool Collator::create_block_extra(Ref& block_extra) { bool mc = is_masterchain(); Ref mc_block_extra; @@ -3992,6 +4921,14 @@ bool Collator::create_block_extra(Ref& block_extra) { && cb.finalize_to(block_extra); // = BlockExtra; } +/** + * Creates the McBlockExtra of the new masterchain block. + * Used in masterchain collator. + * + * @param mc_block_extra A reference to the cell to put the serialized info to. + * + * @returns True if the extra data was successfully created, false otherwise. + */ bool Collator::create_mc_block_extra(Ref& mc_block_extra) { if (!is_masterchain()) { return false; @@ -4009,6 +4946,18 @@ bool Collator::create_mc_block_extra(Ref& mc_block_extra) { && cb.finalize_to(mc_block_extra); // = McBlockExtra } +/** + * Serialized the new block. + * + * This function performs the following steps: + * 1. Creates a BlockInfo for the new block. + * 2. Checks the value flow for the new block. + * 3. Creates a BlockExtra for the new block. + * 4. Builds a new block using the created BlockInfo, value flow, state update, and BlockExtra. + * 5. Verifies the new block if the verification is enabled. + * + * @returns True if the new block is successfully created, false otherwise. + */ bool Collator::create_block() { Ref block_info, extra; if (!create_block_info(block_info)) { @@ -4047,6 +4996,15 @@ bool Collator::create_block() { return true; } +/** + * Collates the shard block description set. + * Used in masterchain collator. + * + * This function creates a dictionary and populates it with the shard block descriptions. + * + * @returns A `Ref` containing the serialized `TopBlockDescrSet` record. + * If serialization fails, an empty `Ref` is returned. + */ Ref Collator::collate_shard_block_descr_set() { vm::Dictionary dict{96}; for (const auto& descr : used_shard_block_descr_) { @@ -4099,6 +5057,11 @@ bool Collator::prepare_msg_queue_proof() { return res; } +/** + * Creates collated data for the block. + * + * @returns True if the collated data was successfully created, false otherwise. + */ bool Collator::create_collated_data() { // 1. store the set of used shard block descriptions if (!used_shard_block_descr_.empty()) { @@ -4155,6 +5118,18 @@ bool Collator::create_collated_data() { return true; } +/** + * Creates a block candidate for the Collator. + * + * This function serializes the new block and collated data, and creates a BlockCandidate object + * with the necessary information. It then checks if the size of the block candidate exceeds the + * limits specified in the consensus configuration. + * + * Finally, the block candidate is saved to the disk. + * If there are any bad external messages or delayed external messages, the ValidatorManager is called to handle them. + * + * @returns True if the block candidate was created successfully, false otherwise. + */ bool Collator::create_block_candidate() { // 1. serialize block LOG(INFO) << "serializing new Block"; @@ -4235,6 +5210,11 @@ bool Collator::create_block_candidate() { return true; } +/** + * Returns a block candidate to the Promise. + * + * @param saved The result of saving the block candidate to the disk. + */ void Collator::return_block_candidate(td::Result saved) { // 6. return data to the original "caller" if (saved.is_error()) { @@ -4243,8 +5223,8 @@ void Collator::return_block_candidate(td::Result saved) { fatal_error(std::move(err)); } else { CHECK(block_candidate); - LOG(INFO) << "sending new BlockCandidate to Promise"; - LOG(INFO) << "collation took " << perf_timer_.elapsed() << " s"; + LOG(WARNING) << "sending new BlockCandidate to Promise"; + LOG(WARNING) << "collation took " << perf_timer_.elapsed() << " s"; main_promise(block_candidate->clone()); busy_ = false; stop(); @@ -4257,6 +5237,18 @@ void Collator::return_block_candidate(td::Result saved) { * */ +/** + * Registers an external message to the list of external messages in the Collator. + * + * @param ext_msg The reference to the external message cell. + * @param ext_hash The hash of the external message. + * + * @returns Result indicating the success or failure of the registration. + * - If the external message is invalid, returns an error. + * - If the external message has been previously rejected, returns an error + * - If the external message has been previuosly registered and accepted, returns false. + * - Otherwise returns true. + */ td::Result Collator::register_external_message_cell(Ref ext_msg, const ExtMessage::Hash& ext_hash) { if (ext_msg->get_level() != 0) { return td::Status::Error("external message must have zero level"); @@ -4305,23 +5297,11 @@ td::Result Collator::register_external_message_cell(Ref ext_msg, return true; } -/* -td::Result Collator::register_external_message(td::Slice ext_msg_boc) { - if (ext_msg_boc.size() > max_ext_msg_size) { - return td::Status::Error("external message too large, rejecting"); - } - vm::BagOfCells boc; - auto res = boc.deserialize(ext_msg_boc); - if (res.is_error()) { - return res.move_as_error(); - } - if (boc.get_root_count() != 1) { - return td::Status::Error("external message is not a valid bag of cells"); // not a valid bag-of-Cells - } - return register_external_message_cell(boc.get_root_cell(0)); -} -*/ - +/** + * Callback function called after retrieving external messages. + * + * @param res The result of the external message retrieval operation. + */ void Collator::after_get_external_messages(td::Result>> res) { --pending; if (res.is_error()) { @@ -4342,47 +5322,11 @@ void Collator::after_get_external_messages(td::Resulthash()); } } + LOG(WARNING) << "got " << vect.size() << " external messages from mempool, " << bad_ext_msgs_.size() + << " bad messages"; check_pending(); } -td::Result Collator::register_ihr_message_cell(Ref ihr_msg) { - return false; -} - -td::Result Collator::register_ihr_message(td::Slice ihr_msg_boc) { - if (ihr_msg_boc.size() > max_ihr_msg_size) { - return td::Status::Error("IHR message too large, rejecting"); - } - vm::BagOfCells boc; - auto res = boc.deserialize(ihr_msg_boc); - if (res.is_error()) { - return res.move_as_error(); - } - if (boc.get_root_count() != 1) { - return td::Status::Error("IHR message is not a valid bag of cells"); // not a valid bag-of-Cells - } - return register_ihr_message_cell(boc.get_root_cell(0)); -} - -td::Result Collator::register_shard_signatures_cell(Ref signatures) { - return false; -} - -td::Result Collator::register_shard_signatures(td::Slice signatures_boc) { - if (signatures_boc.size() > max_blk_sign_size) { - return td::Status::Error("Shardchain signatures block too large, rejecting"); - } - vm::BagOfCells boc; - auto res = boc.deserialize(signatures_boc); - if (res.is_error()) { - return res.move_as_error(); - } - if (boc.get_root_count() != 1) { - return td::Status::Error("Shardchain signatures block is not a valid bag of cells"); // not a valid bag-of-Cells - } - return register_shard_signatures_cell(boc.get_root_cell(0)); -} - } // namespace validator } // namespace ton diff --git a/validator/impl/external-message.cpp b/validator/impl/external-message.cpp index 9383e734..073e7360 100644 --- a/validator/impl/external-message.cpp +++ b/validator/impl/external-message.cpp @@ -102,7 +102,7 @@ void ExtMessageQ::run_message(td::BufferSlice data, block::SizeLimitsConfig::Ext run_fetch_account_state( wc, addr, manager, - [promise = std::move(promise), msg_root = root, wc, + [promise = std::move(promise), msg_root = root, wc, addr, M](td::Result, UnixTime, LogicalTime, std::unique_ptr>> res) mutable { if (res.is_error()) { @@ -114,7 +114,8 @@ void ExtMessageQ::run_message(td::BufferSlice data, block::SizeLimitsConfig::Ext auto utime = std::get<1>(tuple); auto lt = std::get<2>(tuple); auto config = std::move(std::get<3>(tuple)); - if (!acc.unpack(shard_acc, {}, utime, false)) { + bool special = wc == masterchainId && config->is_special_smartcontract(addr); + if (!acc.unpack(shard_acc, utime, special)) { promise.set_error(td::Status::Error(PSLICE() << "Failed to unpack account state")); } else { auto status = run_message_on_account(wc, &acc, utime, lt + 1, msg_root, std::move(config)); @@ -155,6 +156,7 @@ td::Status ExtMessageQ::run_message_on_account(ton::WorkchainId wc, } compute_phase_cfg_.libraries = std::make_unique(config->get_libraries_root(), 256); compute_phase_cfg_.with_vm_log = true; + compute_phase_cfg_.stop_on_accept_message = true; auto res = Collator::impl_create_ordinary_transaction(msg_root, acc, utime, lt, &storage_phase_cfg_, &compute_phase_cfg_, diff --git a/validator/impl/fabric.cpp b/validator/impl/fabric.cpp index a5c52f7c..fae29371 100644 --- a/validator/impl/fabric.cpp +++ b/validator/impl/fabric.cpp @@ -39,8 +39,9 @@ namespace ton { namespace validator { -td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_) { - return td::actor::create_actor("db", manager, db_root_); +td::actor::ActorOwn create_db_actor(td::actor::ActorId manager, std::string db_root_, + td::Ref opts) { + return td::actor::create_actor("db", manager, db_root_, opts); } td::actor::ActorOwn create_liteserver_cache_actor(td::actor::ActorId manager, @@ -202,10 +203,12 @@ void run_validate_query(ShardIdFull shard, BlockIdExt min_masterchain_block_id, } } bool is_fake = mode & ValidateMode::fake; - td::actor::create_actor( - PSTRING() << (is_fake ? "fakevalidate" : "validateblock") << shard.to_str() << ":" << (seqno + 1), shard, - min_masterchain_block_id, std::move(prev), std::move(candidate), std::move(validator_set), std::move(manager), - timeout, std::move(promise), mode) + static std::atomic idx; + td::actor::create_actor(PSTRING() << (is_fake ? "fakevalidate" : "validateblock") << shard.to_str() + << ":" << (seqno + 1) << "#" << idx.fetch_add(1), + shard, min_masterchain_block_id, std::move(prev), std::move(candidate), + std::move(validator_set), std::move(manager), timeout, std::move(promise), + mode) .release(); } diff --git a/validator/impl/liteserver.cpp b/validator/impl/liteserver.cpp index 9c7a0456..34b9f1f1 100644 --- a/validator/impl/liteserver.cpp +++ b/validator/impl/liteserver.cpp @@ -505,20 +505,7 @@ void LiteQuery::perform_sendMessage(td::BufferSlice data) { } void LiteQuery::get_block_handle_checked(BlockIdExt blkid, td::Promise promise) { - auto P = td::PromiseCreator::lambda( - [promise = std::move(promise)](td::Result R) mutable { - if (R.is_error()) { - promise.set_error(R.move_as_error()); - } else { - auto handle = R.move_as_ok(); - if (handle->is_applied()) { - promise.set_result(std::move(handle)); - } else { - promise.set_error(td::Status::Error(ErrorCode::notready, "block is not applied")); - } - } - }); - td::actor::send_closure(manager_, &ValidatorManager::get_block_handle, blkid, false, std::move(P)); + td::actor::send_closure(manager_, &ValidatorManager::get_block_handle_for_litequery, blkid, std::move(promise)); } bool LiteQuery::request_mc_block_data(BlockIdExt blkid) { @@ -1047,7 +1034,8 @@ bool LiteQuery::make_state_root_proof(Ref& proof, Ref state_ vm::MerkleProofBuilder pb{std::move(block_root)}; block::gen::Block::Record blk; block::gen::BlockInfo::Record info; - if (!(tlb::unpack_cell(pb.root(), blk) && tlb::unpack_cell(blk.info, info))) { + if (!(tlb::unpack_cell(pb.root(), blk) && tlb::unpack_cell(blk.info, info) && + block::gen::BlkPrevInfo(info.after_merge).validate_ref(info.prev_ref))) { return fatal_error("cannot unpack block header"); } vm::CellSlice upd_cs{vm::NoVmSpec(), blk.state_update}; @@ -1497,17 +1485,12 @@ void LiteQuery::continue_getTransactions(unsigned remaining, bool exact) { LOG(DEBUG) << "sending get_block_by_lt_from_db() query to manager for " << acc_workchain_ << ":" << acc_addr_.to_hex() << " " << trans_lt_; td::actor::send_closure_later( - manager_, &ValidatorManager::get_block_by_lt_from_db, ton::extract_addr_prefix(acc_workchain_, acc_addr_), + manager_, &ValidatorManager::get_block_by_lt_from_db_for_litequery, ton::extract_addr_prefix(acc_workchain_, acc_addr_), trans_lt_, [Self = actor_id(this), remaining, manager = manager_](td::Result res) { if (res.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_getTransactions, res.move_as_error(), ton::BlockIdExt{}); } else { auto handle = res.move_as_ok(); - if (!handle->is_applied()) { - td::actor::send_closure(Self, &LiteQuery::abort_getTransactions, td::Status::Error(ErrorCode::notready, "block is not applied"), - ton::BlockIdExt{}); - return; - } LOG(DEBUG) << "requesting data for block " << handle->id().to_str(); td::actor::send_closure_later(manager, &ValidatorManager::get_block_data_from_db, handle, [Self, blkid = handle->id(), remaining](td::Result> res) { @@ -1846,10 +1829,6 @@ void LiteQuery::perform_lookupBlock(BlockId blkid, int mode, LogicalTime lt, Uni td::actor::send_closure(Self, &LiteQuery::abort_query, res.move_as_error()); } else { auto handle = res.move_as_ok(); - if (!handle->is_applied()) { - td::actor::send_closure(Self, &LiteQuery::abort_query, td::Status::Error(ErrorCode::notready, "block is not applied")); - return; - } LOG(DEBUG) << "requesting data for block " << handle->id().to_str(); td::actor::send_closure_later(manager, &ValidatorManager::get_block_data_from_db, handle, [Self, blkid = handle->id(), mode](td::Result> res) { @@ -1865,13 +1844,14 @@ void LiteQuery::perform_lookupBlock(BlockId blkid, int mode, LogicalTime lt, Uni ton::AccountIdPrefixFull pfx{blkid.workchain, blkid.shard}; if (mode & 2) { - td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_lt_from_db, pfx, lt, std::move(P)); + td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_lt_from_db_for_litequery, pfx, lt, + std::move(P)); } else if (mode & 4) { - td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_unix_time_from_db, pfx, utime, + td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_unix_time_from_db_for_litequery, pfx, utime, std::move(P)); } else { - td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_seqno_from_db, pfx, blkid.seqno, - std::move(P)); + td::actor::send_closure_later(manager_, &ValidatorManager::get_block_by_seqno_from_db_for_litequery, pfx, + blkid.seqno, std::move(P)); } } @@ -2629,7 +2609,7 @@ void LiteQuery::perform_getShardBlockProof(BlockIdExt blkid) { } AccountIdPrefixFull pfx{masterchainId, shardIdAll}; td::actor::send_closure_later( - manager, &ValidatorManager::get_block_by_seqno_from_db, pfx, handle->masterchain_ref_block(), + manager, &ValidatorManager::get_block_by_seqno_from_db_for_litequery, pfx, handle->masterchain_ref_block(), [Self, manager](td::Result R) { if (R.is_error()) { td::actor::send_closure(Self, &LiteQuery::abort_query, R.move_as_error()); diff --git a/validator/impl/validate-query.cpp b/validator/impl/validate-query.cpp index 7ae4bbff..e1ca9386 100644 --- a/validator/impl/validate-query.cpp +++ b/validator/impl/validate-query.cpp @@ -40,6 +40,11 @@ namespace validator { using td::Ref; using namespace std::literals::string_literals; +/** + * Converts the error context to a string representation to show it in case of validation error. + * + * @returns The error context as a string. + */ std::string ErrorCtx::as_string() const { std::string a; for (const auto& s : entries_) { @@ -49,6 +54,20 @@ std::string ErrorCtx::as_string() const { return a; } +/** + * Constructs a ValidateQuery object. + * + * @param shard The shard of the block being validated. + * @param min_ts The minimum allowed UnixTime for the block. + * @param min_masterchain_block_id The minimum allowed masterchain block reference for the block. + * @param prev A vector of BlockIdExt representing the previous blocks. + * @param candidate The BlockCandidate to be validated. + * @param validator_set A reference to the ValidatorSet. + * @param manager The ActorId of the ValidatorManager. + * @param timeout The timeout for the validation. + * @param promise The Promise to return the ValidateCandidateResult to. + * @param is_fake A boolean indicating if the validation is fake (performed when creating a hardfork). + */ ValidateQuery::ValidateQuery(ShardIdFull shard, BlockIdExt min_masterchain_block_id, std::vector prev, BlockCandidate candidate, Ref validator_set, td::actor::ActorId manager, td::Timestamp timeout, @@ -71,14 +90,30 @@ ValidateQuery::ValidateQuery(ShardIdFull shard, BlockIdExt min_masterchain_block }) { } +/** + * Raises an error when timeout is reached. + */ void ValidateQuery::alarm() { abort_query(td::Status::Error(ErrorCode::timeout, "timeout")); } +/** + * Aborts the validation with the given error. + * + * @param error The error encountered. + */ void ValidateQuery::abort_query(td::Status error) { (void)fatal_error(std::move(error)); } +/** + * Rejects the validation and logs an error message. + * + * @param error The error message to be logged. + * @param reason The reason for rejecting the validation. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::reject_query(std::string error, td::BufferSlice reason) { error = error_ctx() + error; LOG(ERROR) << "REJECT: aborting validation of block candidate for " << shard_.to_str() << " : " << error; @@ -95,11 +130,28 @@ bool ValidateQuery::reject_query(std::string error, td::BufferSlice reason) { return false; } +/** + * Rejects the validation and logs an error message. + * + * @param err_msg The error message to be displayed. + * @param error The error status. + * @param reason The reason for rejecting the query. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::reject_query(std::string err_msg, td::Status error, td::BufferSlice reason) { error.ensure_error(); return reject_query(err_msg + " : " + error.to_string(), std::move(reason)); } +/** + * Rejects the validation and logs an error message. + * + * @param error The error message to be logged. + * @param reason The reason for rejecting the validation. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::soft_reject_query(std::string error, td::BufferSlice reason) { error = error_ctx() + error; LOG(ERROR) << "SOFT REJECT: aborting validation of block candidate for " << shard_.to_str() << " : " << error; @@ -116,6 +168,13 @@ bool ValidateQuery::soft_reject_query(std::string error, td::BufferSlice reason) return false; } +/** + * Handles a fatal error during validation. + * + * @param error The error status. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(td::Status error) { error.ensure_error(); LOG(ERROR) << "aborting validation of block candidate for " << shard_.to_str() << " : " << error.to_string(); @@ -135,22 +194,51 @@ bool ValidateQuery::fatal_error(td::Status error) { return false; } +/** + * Handles a fatal error during validation. + * + * @param err_code Error code. + * @param error Error message. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(int err_code, std::string err_msg) { return fatal_error(td::Status::Error(err_code, error_ctx() + err_msg)); } +/** + * Handles a fatal error during validation. + * + * @param err_code Error code. + * @param err_msg Error message. + * @param error Error status. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(int err_code, std::string err_msg, td::Status error) { error.ensure_error(); return fatal_error(err_code, err_msg + " : " + error.to_string()); } +/** + * Handles a fatal error during validation. + * + * @param error Error message. + * @param err_code Error code. + * + * @returns False indicating that the validation failed. + */ bool ValidateQuery::fatal_error(std::string err_msg, int err_code) { return fatal_error(td::Status::Error(err_code, error_ctx() + err_msg)); } +/** + * Finishes the query and sends the result to the promise. + */ void ValidateQuery::finish_query() { if (main_promise) { - LOG(INFO) << "validation took " << perf_timer_.elapsed() << " s"; + LOG(WARNING) << "validate query done"; + LOG(WARNING) << "validation took " << perf_timer_.elapsed() << " s"; main_promise.set_result(now_); } stop(); @@ -162,8 +250,14 @@ void ValidateQuery::finish_query() { * */ +/** + * Starts the validation process. + * + * This function performs various checks on the validation parameters and the block candidate. + * Then the function also sends requests to the ValidatorManager to fetch blocks and shard stated. + */ void ValidateQuery::start_up() { - LOG(INFO) << "validate query for " << block_candidate.id.to_str() << " started"; + LOG(WARNING) << "validate query for " << block_candidate.id.to_str() << " started"; LOG(DEBUG) << "full_collated_data is " << full_collated_data_; alarm_timestamp() = timeout; rand_seed_.set_zero(); @@ -314,7 +408,16 @@ void ValidateQuery::start_up() { CHECK(pending); } -// unpack block candidate, and check root hash and file hash +/** + * Unpacks and validates a block candidate. + * + * This function unpacks the block candidate data and performs various validation checks to ensure its integrity. + * It checks the file hash and root hash of the block candidate against the expected values. + * It then parses the block header and checks its validity. + * Finally, it deserializes the collated data and extracts the collated roots. + * + * @returns True if the block candidate was successfully unpacked, false otherwise. + */ bool ValidateQuery::unpack_block_candidate() { vm::BagOfCells boc1, boc2; // 1. deserialize block itself @@ -366,6 +469,11 @@ bool ValidateQuery::unpack_block_candidate() { return extract_collated_data(); } +/** + * Initializes the validation by parsing and checking the block header. + * + * @returns True if the initialization is successful, false otherwise. + */ bool ValidateQuery::init_parse() { CHECK(block_root_.not_null()); std::vector prev_blks; @@ -493,6 +601,14 @@ bool ValidateQuery::init_parse() { return true; } +/** + * Extracts collated data from a cell. + * + * @param croot The root cell containing the collated data. + * @param idx The index of the root. + * + * @returns True if the extraction is successful, false otherwise. + */ bool ValidateQuery::extract_collated_data_from(Ref croot, int idx) { bool is_special = false; auto cs = vm::load_cell_slice_special(croot, is_special); @@ -530,7 +646,11 @@ bool ValidateQuery::extract_collated_data_from(Ref croot, int idx) { return true; } -// processes further and sorts data in collated_roots_ +/** + * Extracts collated data from a list of collated roots. + * + * @returns True if the extraction is successful, False otherwise. + */ bool ValidateQuery::extract_collated_data() { int i = -1; for (auto croot : collated_roots_) { @@ -549,8 +669,13 @@ bool ValidateQuery::extract_collated_data() { return true; } +/** + * Callback function called after retrieving the latest masterchain state. + * + * @param res The result of the retrieval of the latest masterchain state. + */ void ValidateQuery::after_get_latest_mc_state(td::Result, BlockIdExt>> res) { - LOG(DEBUG) << "in ValidateQuery::after_get_latest_mc_state()"; + LOG(WARNING) << "in ValidateQuery::after_get_latest_mc_state()"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -585,8 +710,13 @@ void ValidateQuery::after_get_latest_mc_state(td::Result> res) { - LOG(DEBUG) << "in ValidateQuery::after_get_mc_state() for " << mc_blkid_.to_str(); + LOG(WARNING) << "in ValidateQuery::after_get_mc_state() for " << mc_blkid_.to_str(); --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -603,6 +733,11 @@ void ValidateQuery::after_get_mc_state(td::Result> res) { } } +/** + * Callback function for handling the result of retrieving a masterchain block handle referenced in the block. + * + * @param res The result of retrieving the masterchain block handle. + */ void ValidateQuery::got_mc_handle(td::Result res) { LOG(DEBUG) << "in ValidateQuery::got_mc_handle() for " << mc_blkid_.to_str(); --pending; @@ -618,8 +753,14 @@ void ValidateQuery::got_mc_handle(td::Result res) { } } +/** + * Callback function called after retrieving the shard state for a previous block. + * + * @param idx The index of the previous block (0 or 1). + * @param res The result of the shard state retrieval. + */ void ValidateQuery::after_get_shard_state(int idx, td::Result> res) { - LOG(DEBUG) << "in ValidateQuery::after_get_shard_state(" << idx << ")"; + LOG(WARNING) << "in ValidateQuery::after_get_shard_state(" << idx << ")"; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -650,6 +791,13 @@ void ValidateQuery::after_get_shard_state(int idx, td::Result> r } } +/** + * Processes the retreived masterchain state. + * + * @param mc_state The reference to the masterchain state. + * + * @returns True if the masterchain state is successfully processed, false otherwise. + */ bool ValidateQuery::process_mc_state(Ref mc_state) { if (mc_state.is_null()) { return fatal_error("could not obtain reference masterchain state "s + mc_blkid_.to_str()); @@ -671,6 +819,11 @@ bool ValidateQuery::process_mc_state(Ref mc_state) { return register_mc_state(mc_state_); } +/** + * Tries to unpack the masterchain state and perform necessary checks. + * + * @returns True if the unpacking and checks are successful, false otherwise. + */ bool ValidateQuery::try_unpack_mc_state() { LOG(DEBUG) << "unpacking reference masterchain state"; auto guard = error_ctx_add_guard("unpack last mc state"); @@ -757,7 +910,12 @@ bool ValidateQuery::try_unpack_mc_state() { return true; } -// almost the same as in Collator +/** + * Fetches and validates configuration parameters from the masterchain configuration. + * Almost the same as in Collator. + * + * @returns True if all configuration parameters were successfully fetched and validated, false otherwise. + */ bool ValidateQuery::fetch_config_params() { old_mparams_ = config_->get_config_param(9); { @@ -789,6 +947,12 @@ bool ValidateQuery::fetch_config_params() { storage_phase_cfg_.delete_due_limit)) { return fatal_error("cannot unpack current gas prices and limits from masterchain configuration"); } + auto mc_gas_prices = config_->get_gas_limits_prices(true); + if (mc_gas_prices.is_error()) { + return fatal_error(mc_gas_prices.move_as_error_prefix("cannot unpack masterchain gas prices and limits: ")); + } + compute_phase_cfg_.mc_gas_prices = mc_gas_prices.move_as_ok(); + compute_phase_cfg_.special_gas_full = config_->get_global_version() >= 5; storage_phase_cfg_.enable_due_payment = config_->get_global_version() >= 4; compute_phase_cfg_.block_rand_seed = rand_seed_; compute_phase_cfg_.libraries = std::make_unique(config_->get_libraries_root(), 256); @@ -803,7 +967,11 @@ bool ValidateQuery::fetch_config_params() { } compute_phase_cfg_.prev_blocks_info = prev_blocks_info.move_as_ok(); } + if (compute_phase_cfg_.global_version >= 6) { + compute_phase_cfg_.unpacked_config_tuple = config_->get_unpacked_config_tuple(now_); + } compute_phase_cfg_.suspended_addresses = config_->get_suspended_addresses(now_); + compute_phase_cfg_.size_limits = size_limits; } { // compute action_phase_cfg @@ -846,7 +1014,16 @@ bool ValidateQuery::fetch_config_params() { return true; } -// almost the same as in Collator +/** + * Checks the previous block against the block registered in the masterchain. + * Almost the same as in Collator. + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * @param chk_chain_len Flag indicating whether to check the chain length. + * + * @returns True if the previous block is valid, false otherwise. + */ bool ValidateQuery::check_prev_block(const BlockIdExt& listed, const BlockIdExt& prev, bool chk_chain_len) { if (listed.seqno() > prev.seqno()) { return reject_query(PSTRING() << "cannot generate a shardchain block after previous block " << prev.to_str() @@ -866,7 +1043,15 @@ bool ValidateQuery::check_prev_block(const BlockIdExt& listed, const BlockIdExt& return true; } -// almost the same as in Collator +/** + * Checks the previous block against the block registered in the masterchain. + * Almost the same as in Collator + * + * @param listed The BlockIdExt of the top block of this shard registered in the masterchain. + * @param prev The BlockIdExt of the previous block. + * + * @returns True if the previous block is equal to the one registered in the masterchain, false otherwise. + */ bool ValidateQuery::check_prev_block_exact(const BlockIdExt& listed, const BlockIdExt& prev) { if (listed != prev) { return reject_query(PSTRING() << "cannot generate shardchain block for shard " << shard_.to_str() @@ -877,8 +1062,12 @@ bool ValidateQuery::check_prev_block_exact(const BlockIdExt& listed, const Block return true; } -// almost the same as in Collator -// (main change: fatal_error -> reject_query) +/** + * Checks the validity of the shard configuration of the current shard. + * Almost the same as in Collator (main change: fatal_error -> reject_query). + * + * @returns True if the shard's configuration is valid, False otherwise. + */ bool ValidateQuery::check_this_shard_mc_info() { wc_info_ = config_->get_workchain_info(workchain()); if (wc_info_.is_null()) { @@ -1023,6 +1212,11 @@ bool ValidateQuery::check_this_shard_mc_info() { * */ +/** + * Computes the previous shard state. + * + * @returns True if the previous state is computed successfully, false otherwise. + */ bool ValidateQuery::compute_prev_state() { if (!is_masterchain() && full_collated_data_) { return compute_prev_state_from_collated_data(); @@ -1095,6 +1289,9 @@ bool ValidateQuery::compute_prev_state_from_collated_data() { return true; } +/** + * Computes the next shard state using the previous state and the block's Merkle update. + */ bool ValidateQuery::compute_next_state() { LOG(DEBUG) << "computing next state"; auto res = vm::MerkleUpdate::validate(state_update_); @@ -1170,7 +1367,13 @@ bool ValidateQuery::compute_next_state() { return true; } -// similar to Collator::unpack_merge_last_state() +/** + * Unpacks and merges the states of two previous blocks. + * Used if the block is after_merge. + * Similar to Collator::unpack_merge_last_state() + * + * @returns True if the unpacking and merging was successful, false otherwise. + */ bool ValidateQuery::unpack_merge_prev_state() { LOG(DEBUG) << "unpack/merge previous states"; CHECK(prev_states.size() == 2); @@ -1198,7 +1401,13 @@ bool ValidateQuery::unpack_merge_prev_state() { return true; } -// similar to Collator::unpack_last_state() +/** + * Unpacks the state of the previous block. + * Used if the block is not after_merge. + * Similar to Collator::unpack_last_state() + * + * @returns True if the unpacking is successful, false otherwise. + */ bool ValidateQuery::unpack_prev_state() { LOG(DEBUG) << "unpacking previous state(s)"; CHECK(prev_state_root_.not_null()); @@ -1213,7 +1422,16 @@ bool ValidateQuery::unpack_prev_state() { return unpack_one_prev_state(ps_, prev_blocks.at(0), prev_state_root_) && (!after_split_ || split_prev_state(ps_)); } -// similar to Collator::unpack_one_last_state() +/** + * Unpacks the state of a previous block and performs necessary checks. + * Similar to Collator::unpack_one_last_state() + * + * @param ss The ShardState object to unpack the state into. + * @param blkid The BlockIdExt of the previous block. + * @param prev_state_root The root of the state. + * + * @returns True if the unpacking and checks are successful, false otherwise. + */ bool ValidateQuery::unpack_one_prev_state(block::ShardState& ss, BlockIdExt blkid, Ref prev_state_root) { auto res = ss.unpack_state_ext(blkid, std::move(prev_state_root), global_id_, mc_seqno_, after_split_, after_split_ | after_merge_, [this](ton::BlockSeqno mc_seqno) { @@ -1230,7 +1448,15 @@ bool ValidateQuery::unpack_one_prev_state(block::ShardState& ss, BlockIdExt blki return true; } -// similar to Collator::split_last_state() +/** + * Splits the state of previous block. + * Used if the block is after_split. + * Similar to Collator::split_last_state() + * + * @param ss The ShardState object representing the previous state. The result is stored here. + * + * @returns True if the split operation is successful, false otherwise. + */ bool ValidateQuery::split_prev_state(block::ShardState& ss) { LOG(INFO) << "Splitting previous state " << ss.id_.to_str() << " to subshard " << shard_.to_str(); CHECK(after_split_); @@ -1252,6 +1478,11 @@ bool ValidateQuery::split_prev_state(block::ShardState& ss) { return true; } +/** + * Unpacks the next state (obtained by applying the Merkle update) and performs checks. + * + * @returns True if the next state is successfully unpacked and passes all checks, false otherwise. + */ bool ValidateQuery::unpack_next_state() { LOG(DEBUG) << "unpacking new state"; CHECK(state_root_.not_null()); @@ -1281,7 +1512,12 @@ bool ValidateQuery::unpack_next_state() { return true; } -// almost the same as in Collator +/** + * Requests the message queues of neighboring shards. + * Almost the same as in Collator. + * + * @returns True if the request for neighbor message queues was successful, false otherwise. + */ bool ValidateQuery::request_neighbor_queues() { CHECK(new_shard_conf_); auto neighbor_list = new_shard_conf_->get_neighbor_shard_hash_ids(shard_); @@ -1347,9 +1583,14 @@ bool ValidateQuery::request_neighbor_queues() { return true; } -// almost the same as in Collator +/** + * Handles the result of obtaining the outbound queue for a neighbor. + * Almost the same as in Collator. + * + * @param i The index of the neighbor. + * @param res The obtained outbound queue. + */ void ValidateQuery::got_neighbor_out_queue(int i, td::Result> res) { - LOG(DEBUG) << "obtained outbound queue for neighbor #" << i; --pending; if (res.is_error()) { fatal_error(res.move_as_error()); @@ -1357,6 +1598,7 @@ void ValidateQuery::got_neighbor_out_queue(int i, td::Result> } Ref outq_descr = res.move_as_ok(); block::McShardDescr& descr = neighbors_.at(i); + LOG(WARNING) << "obtained outbound queue for neighbor #" << i << " : " << descr.shard().to_str(); if (outq_descr->get_block_id() != descr.blk_) { LOG(DEBUG) << "outq_descr->id = " << outq_descr->get_block_id().to_str() << " ; descr.id = " << descr.blk_.to_str(); fatal_error( @@ -1409,7 +1651,14 @@ void ValidateQuery::got_neighbor_out_queue(int i, td::Result> } } -// almost the same as in Collator +/** + * Registers a masterchain state. + * Almost the same as in Collator. + * + * @param other_mc_state The masterchain state to register. + * + * @returns True if the registration is successful, false otherwise. + */ bool ValidateQuery::register_mc_state(Ref other_mc_state) { if (other_mc_state.is_null() || mc_state_.is_null()) { return false; @@ -1435,7 +1684,15 @@ bool ValidateQuery::register_mc_state(Ref other_mc_state) { return true; } -// almost the same as in Collator +/** + * Requests the auxiliary masterchain state. + * Almost the same as in Collator + * + * @param seqno The seqno of the block. + * @param state A reference to the auxiliary masterchain state. + * + * @returns True if the auxiliary masterchain state is successfully requested, false otherwise. + */ bool ValidateQuery::request_aux_mc_state(BlockSeqno seqno, Ref& state) { if (mc_state_.is_null()) { return fatal_error(PSTRING() << "cannot find masterchain block with seqno " << seqno @@ -1469,7 +1726,14 @@ bool ValidateQuery::request_aux_mc_state(BlockSeqno seqno, Ref ValidateQuery::get_aux_mc_state(BlockSeqno seqno) const { auto it = aux_mc_states_.find(seqno); if (it != aux_mc_states_.end()) { @@ -1479,7 +1743,14 @@ Ref ValidateQuery::get_aux_mc_state(BlockSeqno seqno) const { } } -// almost the same as in Collator +/** + * Callback function called after retrieving the auxiliary shard state. + * Handles the retrieved shard state and performs necessary checks and registrations. + * Almost the same as in Collator. + * + * @param blkid The BlockIdExt of the shard state. + * @param res The result of retrieving the shard state. + */ void ValidateQuery::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result> res) { LOG(DEBUG) << "in ValidateQuery::after_get_aux_shard_state(" << blkid.to_str() << ")"; --pending; @@ -1506,6 +1777,17 @@ void ValidateQuery::after_get_aux_shard_state(ton::BlockIdExt blkid, td::Result< } // similar to Collator::update_one_shard() +/** + * Checks one shard description in the masterchain shard configuration. + * Used in masterchain validation. + * + * @param info The shard information to be updated. + * @param sibling The sibling shard information. + * @param wc_info The workchain information. + * @param ccvc The Catchain validators configuration. + * + * @returns True if the validation wasa successful, false othewise. + */ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block::McShardHash* sibling, const block::WorkchainInfo* wc_info, const block::CatchainValidatorsConfig& ccvc) { auto shard = info.shard(); @@ -1792,8 +2074,14 @@ bool ValidateQuery::check_one_shard(const block::McShardHash& info, const block: return true; } -// checks old_shard_conf_ -> new_shard_conf_ transition using top_shard_descr_dict_ from collated data -// similar to Collator::update_shard_config() +/** + * Checks the shard configuration in the masterchain. + * Used in masterchain collator. + * Checks old_shard_conf_ -> new_shard_conf_ transition using top_shard_descr_dict_ from collated data. + * Similar to Collator::update_shard_config() + * + * @returns True if the shard layout is valid, false otherwise. + */ bool ValidateQuery::check_shard_layout() { prev_now_ = config_->utime; if (prev_now_ > now_) { @@ -1847,7 +2135,14 @@ bool ValidateQuery::check_shard_layout() { return check_mc_validator_info(is_key_block_ || (now_ / ccvc.mc_cc_lifetime > prev_now_ / ccvc.mc_cc_lifetime)); } -// similar to Collator::register_shard_block_creators +/** + * Registers the shard block creators to block_create_count_ + * Similar to Collator::register_shard_block_creators + * + * @param creator_list A vector of Bits256 representing the shard block creators. + * + * @returns True if the registration was successful, False otherwise. + */ bool ValidateQuery::register_shard_block_creators(std::vector creator_list) { for (const auto& x : creator_list) { LOG(DEBUG) << "registering block creator " << x.to_hex(); @@ -1862,7 +2157,12 @@ bool ValidateQuery::register_shard_block_creators(std::vector creat return true; } -// similar to Collator::check_cur_validator_set() +/** + * Checks that the current validator set is entitled to create blocks in this shard and has a correct catchain seqno. + * Similar to Collator::check_cur_validator_set() + * + * @returns True if the current validator set is valid, false otherwise. + */ bool ValidateQuery::check_cur_validator_set() { CatchainSeqno cc_seqno = 0; auto nodes = config_->compute_validator_set_cc(shard_, now_, &cc_seqno); @@ -1885,8 +2185,14 @@ bool ValidateQuery::check_cur_validator_set() { return true; } -// parallel to 4. of Collator::create_mc_state_extra() -// checks validator_info in mc_state_extra +/** + * Checks validator_info in mc_state_extra. + * NB: could be run in parallel to 4. of Collator::create_mc_state_extra() + * + * @param update_mc_cc Flag indicating whether the masterchain catchain seqno should be updated. + * + * @returns True if the validator information is valid, false otherwise. + */ bool ValidateQuery::check_mc_validator_info(bool update_mc_cc) { block::gen::McStateExtra::Record old_state_extra; block::gen::ValidatorInfo::Record old_val_info; @@ -1928,6 +2234,11 @@ bool ValidateQuery::check_mc_validator_info(bool update_mc_cc) { return true; } +/** + * Checks if the Unix time and logical time of the block are valid. + * + * @returns True if the utime and logical time pass checks, False otherwise. + */ bool ValidateQuery::check_utime_lt() { if (start_lt_ <= ps_.lt_) { return reject_query(PSTRING() << "block has start_lt " << start_lt_ << " less than or equal to lt " << ps_.lt_ @@ -1974,8 +2285,16 @@ bool ValidateQuery::check_utime_lt() { * */ -// almost the same as in Collator -// (but it can take into account the new state of the masterchain) +/** + * Adjusts one entry from the processed up to information using the masterchain state that is referenced in the entry. + * Almost the same as in Collator (but it can take into account the new state of the masterchain). + * + * @param proc The MsgProcessedUpto object. + * @param owner The shard that the MsgProcessesUpto information is taken from. + * @param allow_cur Allow using the new state of the msaterchain. + * + * @returns True if the processed up to information was successfully adjusted, false otherwise. + */ bool ValidateQuery::fix_one_processed_upto(block::MsgProcessedUpto& proc, ton::ShardIdFull owner, bool allow_cur) { if (proc.compute_shard_end_lt) { return true; @@ -1998,7 +2317,15 @@ bool ValidateQuery::fix_one_processed_upto(block::MsgProcessedUpto& proc, ton::S return (bool)proc.compute_shard_end_lt; } -// almost the same as in Collator +/** + * Adjusts the processed up to collection using the using the auxilliary masterchain states. + * Almost the same as in Collator. + * + * @param upto The MsgProcessedUptoCollection to be adjusted. + * @param allow_cur Allow using the new state of the msaterchain. + * + * @returns True if all entries were successfully adjusted, False otherwise. + */ bool ValidateQuery::fix_processed_upto(block::MsgProcessedUptoCollection& upto, bool allow_cur) { for (auto& entry : upto.list) { if (!fix_one_processed_upto(entry, upto.owner, allow_cur)) { @@ -2008,6 +2335,11 @@ bool ValidateQuery::fix_processed_upto(block::MsgProcessedUptoCollection& upto, return true; } +/** + * Adjusts the processed_upto values for all shard states, including neighbors. + * + * @returns True if all processed_upto values were successfully adjusted, false otherwise. + */ bool ValidateQuery::fix_all_processed_upto() { CHECK(ps_.processed_upto_); if (!fix_processed_upto(*ps_.processed_upto_)) { @@ -2028,7 +2360,13 @@ bool ValidateQuery::fix_all_processed_upto() { return true; } -// almost the same as in Collator +/** + * Adds trivials neighbor after merging two shards. + * Trivial neighbors are the two previous blocks. + * Almost the same as in Collator. + * + * @returns True if the operation is successful, false otherwise. + */ bool ValidateQuery::add_trivial_neighbor_after_merge() { LOG(DEBUG) << "in add_trivial_neighbor_after_merge()"; CHECK(prev_blocks.size() == 2); @@ -2063,7 +2401,13 @@ bool ValidateQuery::add_trivial_neighbor_after_merge() { return true; } -// almost the same as in Collator +/** + * Adds a trivial neighbor. + * A trivial neighbor is the previous block. + * Almost the same as in Collator. + * + * @returns True if the operation is successful, false otherwise. + */ bool ValidateQuery::add_trivial_neighbor() { LOG(DEBUG) << "in add_trivial_neighbor()"; if (after_merge_) { @@ -2199,6 +2543,11 @@ bool ValidateQuery::add_trivial_neighbor() { return true; } +/** + * Unpacks block data and performs validation checks. + * + * @returns True if the block data is successfully unpacked and passes all validation checks, false otherwise. + */ bool ValidateQuery::unpack_block_data() { LOG(DEBUG) << "unpacking block structures"; block::gen::Block::Record blk; @@ -2238,6 +2587,13 @@ bool ValidateQuery::unpack_block_data() { return unpack_precheck_value_flow(std::move(blk.value_flow)); } +/** + * Validates and unpacks the value flow of a new block. + * + * @param value_flow_root The root of the value flow to be unpacked and validated. + * + * @returns True if the value flow is valid and unpacked successfully, false otherwise. + */ bool ValidateQuery::unpack_precheck_value_flow(Ref value_flow_root) { vm::CellSlice cs{vm::NoVmOrd(), value_flow_root}; if (!(cs.is_valid() && value_flow_.fetch(cs) && cs.empty_ext())) { @@ -2355,7 +2711,14 @@ bool ValidateQuery::unpack_precheck_value_flow(Ref value_flow_root) { return true; } -// similar to Collator::compute_minted_amount() +/** + * Computes the amount of extra currencies to be minted. + * Similar to Collator::compute_minted_amount() + * + * @param to_mint A reference to the CurrencyCollection object to store the minted amount. + * + * @returns True if the computation is successful, false otherwise. + */ bool ValidateQuery::compute_minted_amount(block::CurrencyCollection& to_mint) { if (!is_masterchain()) { return to_mint.set_zero(); @@ -2407,6 +2770,15 @@ bool ValidateQuery::compute_minted_amount(block::CurrencyCollection& to_mint) { return true; } +/** + * Pre-validates the update of an account in a query. + * + * @param acc_id The 256-bit account address. + * @param old_value The old value of the account serialized as ShardAccount. Can be null. + * @param new_value The new value of the account serialized as ShardAccount. Can be null. + * + * @returns True if the accounts passes preliminary checks, false otherwise. + */ bool ValidateQuery::precheck_one_account_update(td::ConstBitPtr acc_id, Ref old_value, Ref new_value) { LOG(DEBUG) << "checking update of account " << acc_id.to_hex(256); @@ -2466,6 +2838,11 @@ bool ValidateQuery::precheck_one_account_update(td::ConstBitPtr acc_id, Ref trans_csr, ton::Bits256& prev_trans_hash, ton::LogicalTime& prev_trans_lt, unsigned& prev_trans_lt_len, @@ -2553,6 +2943,14 @@ bool ValidateQuery::precheck_one_transaction(td::ConstBitPtr acc_id, ton::Logica } // NB: could be run in parallel for different accounts +/** + * Pre-validates an AccountBlock and all transactions in it. + * + * @param acc_id The 256-bit account address. + * @param acc_blk_root The root of the AccountBlock. + * + * @returns True if the AccountBlock passes pre-checks, false otherwise. + */ bool ValidateQuery::precheck_one_account_block(td::ConstBitPtr acc_id, Ref acc_blk_root) { LOG(DEBUG) << "checking AccountBlock for " << acc_id.to_hex(256); if (!acc_id.equals(shard_pfx_.bits(), shard_pfx_len_)) { @@ -2634,6 +3032,11 @@ bool ValidateQuery::precheck_one_account_block(td::ConstBitPtr acc_id, Ref ValidateQuery::lookup_transaction(const ton::StdSmcAddress& addr, ton::LogicalTime lt) const { CHECK(account_blocks_dict_); block::gen::AccountBlock::Record ab_rec; @@ -2664,7 +3075,13 @@ Ref ValidateQuery::lookup_transaction(const ton::StdSmcAddress& addr, return trans_dict.lookup_ref(td::BitArray<64>{(long long)lt}); } -// checks that a ^Transaction refers to a transaction present in the ShardAccountBlocks +/** + * Checks that a Transaction cell refers to a transaction present in the ShardAccountBlocks. + * + * @param trans_ref The reference to the serialized transaction root. + * + * @returns True if the transaction reference is valid, False otherwise. + */ bool ValidateQuery::is_valid_transaction_ref(Ref trans_ref) const { ton::StdSmcAddress addr; ton::LogicalTime lt; @@ -2684,8 +3101,16 @@ bool ValidateQuery::is_valid_transaction_ref(Ref trans_ref) const { return true; } -// checks that any change in OutMsgQueue in the state is accompanied by an OutMsgDescr record in the block -// also checks that the keys are correct +/** + * Checks that any change in OutMsgQueue in the state is accompanied by an OutMsgDescr record in the block. + * Also checks that the keys are correct. + * + * @param out_msg_id The 32+64+256-bit ID of the outbound message. + * @param old_value The old value of the message queue entry. + * @param new_value The new value of the message queue entry. + * + * @returns True if the update is valid, false otherwise. + */ bool ValidateQuery::precheck_one_message_queue_update(td::ConstBitPtr out_msg_id, Ref old_value, Ref new_value) { LOG(DEBUG) << "checking update of enqueued outbound message " << out_msg_id.get_int(32) << ":" @@ -2851,6 +3276,11 @@ bool ValidateQuery::precheck_one_message_queue_update(td::ConstBitPtr out_msg_id return true; } +/** + * Performs a pre-check on the difference between the old and new outbound message queues. + * + * @returns True if the pre-check is successful, false otherwise. + */ bool ValidateQuery::precheck_message_queue_update() { LOG(INFO) << "pre-checking the difference between the old and the new outbound message queues"; try { @@ -2873,6 +3303,14 @@ bool ValidateQuery::precheck_message_queue_update() { return true; } +/** + * Updates the maximum processed logical time and hash value. + * + * @param lt The logical time to compare against the current maximum processed logical time. + * @param hash The hash value to compare against the current maximum processed hash value. + * + * @returns True if the update was successful, false otherwise. + */ bool ValidateQuery::update_max_processed_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash) { if (proc_lt_ < lt || (proc_lt_ == lt && proc_hash_ < hash)) { proc_lt_ = lt; @@ -2881,6 +3319,14 @@ bool ValidateQuery::update_max_processed_lt_hash(ton::LogicalTime lt, const ton: return true; } +/** + * Updates the minimum enqueued logical time and hash values. + * + * @param lt The logical time to compare. + * @param hash The hash value to compare. + * + * @returns True if the update was successful, false otherwise. + */ bool ValidateQuery::update_min_enqueued_lt_hash(ton::LogicalTime lt, const ton::Bits256& hash) { if (lt < min_enq_lt_ || (lt == min_enq_lt_ && hash < min_enq_hash_)) { min_enq_lt_ = lt; @@ -2889,7 +3335,13 @@ bool ValidateQuery::update_min_enqueued_lt_hash(ton::LogicalTime lt, const ton:: return true; } -// check that the enveloped message (MsgEnvelope) was present in the output queue of a neighbor, and that it has not been processed before +/** + * Checks that the MsgEnvelope was present in the output queue of a neighbor, and that it has not been processed before. + * + * @param msg_env The message envelope of the imported message. + * + * @returns True if the imported internal message passes checks, false otherwise. + */ bool ValidateQuery::check_imported_message(Ref msg_env) { block::tlb::MsgEnvelope::Record_std env; block::gen::CommonMsgInfo::Record_int_msg_info info; @@ -2949,11 +3401,27 @@ bool ValidateQuery::check_imported_message(Ref msg_env) { " has previous address not belonging to any neighbor"); } +/** + * Checks if the given input message is a special message. + * A message is considered special if it recovers fees or mints extra currencies. + * + * @param in_msg The input message to be checked. + * + * @returns True if the input message is special, False otherwise. + */ bool ValidateQuery::is_special_in_msg(const vm::CellSlice& in_msg) const { return (recover_create_msg_.not_null() && vm::load_cell_slice(recover_create_msg_).contents_equal(in_msg)) || (mint_msg_.not_null() && vm::load_cell_slice(mint_msg_).contents_equal(in_msg)); } +/** + * Checks the validity of an inbound message listed in InMsgDescr. + * + * @param key The 256-bit key of the inbound message. + * @param in_msg The inbound message to be checked serialized using InMsg TLB-scheme. + * + * @returns True if the inbound message is valid, false otherwise. + */ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) { LOG(DEBUG) << "checking InMsg with key " << key.to_hex(256); CHECK(in_msg.not_null()); @@ -3360,6 +3828,11 @@ bool ValidateQuery::check_in_msg(td::ConstBitPtr key, Ref in_msg) return true; } +/** + * Checks the validity of the inbound messages listed in the InMsgDescr dictionary. + * + * @returns True if the inbound messages dictionary is valid, false otherwise. + */ bool ValidateQuery::check_in_msg_descr() { LOG(INFO) << "checking inbound messages listed in InMsgDescr"; try { @@ -3379,6 +3852,14 @@ bool ValidateQuery::check_in_msg_descr() { return true; } +/** + * Checks the validity of an outbound message listed in OutMsgDescr. + * + * @param key The 256-bit key of the outbound message. + * @param in_msg The outbound message to be checked serialized using OutMsg TLB-scheme. + * + * @returns True if the outbound message is valid, false otherwise. + */ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_msg) { LOG(DEBUG) << "checking OutMsg with key " << key.to_hex(256); CHECK(out_msg.not_null()); @@ -3896,6 +4377,11 @@ bool ValidateQuery::check_out_msg(td::ConstBitPtr key, Ref out_ms return true; } +/** + * Checks the validity of the outbound messages listed in the OutMsgDescr dictionary. + * + * @returns True if the outbound messages dictionary is valid, false otherwise. + */ bool ValidateQuery::check_out_msg_descr() { LOG(INFO) << "checking outbound messages listed in OutMsgDescr"; try { @@ -3914,7 +4400,12 @@ bool ValidateQuery::check_out_msg_descr() { return true; } -// compare to Collator::update_processed_upto() +/** + * Checks if the processed up to information is valid and consistent. + * Compare to Collator::update_processed_upto() + * + * @returns True if the processed up to information is valid and consistent, false otherwise. + */ bool ValidateQuery::check_processed_upto() { LOG(INFO) << "checking ProcessedInfo"; CHECK(ps_.processed_upto_); @@ -3971,7 +4462,18 @@ bool ValidateQuery::check_processed_upto() { return true; } -// similar to Collator::process_inbound_message +/** + * Checks the validity of an outbound message in the neighbor's queue. + * Similar to Collator::process_inbound_message. + * + * @param enq_msg The enqueued message to validate. + * @param lt The logical time of the message. + * @param key The 32+64+256-bit key of the message. + * @param nb The neighbor's description. + * @param unprocessed A boolean flag that will be set to true if the message is unprocessed, false otherwise. + * + * @returns True if the message is valid, false otherwise. + */ bool ValidateQuery::check_neighbor_outbound_message(Ref enq_msg, ton::LogicalTime lt, td::ConstBitPtr key, const block::McShardDescr& nb, bool& unprocessed, bool& processed_here, td::Bits256& msg_hash) { @@ -4104,6 +4606,11 @@ bool ValidateQuery::check_neighbor_outbound_message(Ref enq_msg, return true; } +/** + * Checks messages from the outbound queues of the neighbors. + * + * @returns True if the messages are valid, false otherwise. + */ bool ValidateQuery::check_in_queue() { int imported_messages_count = 0; in_msg_dict_->check_for_each_extra([&](Ref value, Ref, td::ConstBitPtr, int) { @@ -4162,9 +4669,12 @@ bool ValidateQuery::check_in_queue() { return true; } -// checks that all messages imported from our outbound queue into neighbor shards have been dequeued -// similar to Collator::out_msg_queue_cleanup() -// (but scans new outbound queue instead of the old) +/** + * Checks that all messages imported from our outbound queue into neighbor shards have been dequeued + * Similar to Collator::out_msg_queue_cleanup() (but scans the new outbound queue instead of the old). + * + * @returns True if the delivery status of all messages has been checked successfully, false otherwise. + */ bool ValidateQuery::check_delivered_dequeued() { LOG(INFO) << "scanning new outbound queue and checking delivery status of all messages"; bool ok = false; @@ -4211,26 +4721,42 @@ bool ValidateQuery::check_delivered_dequeued() { }) || ok; } -// similar to Collator::make_account_from() -std::unique_ptr ValidateQuery::make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra) { +/** + * Creates a new Account object from the given address and serialized account data. + * Creates a new Account if not found. + * Similar to Collator::make_account_from() + * + * @param addr A pointer to the 256-bit address of the account. + * @param account A cell slice with an account serialized using ShardAccount TLB-scheme. + * + * @returns A unique pointer to the created Account object, or nullptr if the creation failed. + */ +std::unique_ptr ValidateQuery::make_account_from(td::ConstBitPtr addr, Ref account) { auto ptr = std::make_unique(workchain(), addr); if (account.is_null()) { if (!ptr->init_new(now_)) { return nullptr; } - } else if (!ptr->unpack(std::move(account), std::move(extra), now_, - is_masterchain() && config_->is_special_smartcontract(addr))) { + } else if (!ptr->unpack(std::move(account), now_, is_masterchain() && config_->is_special_smartcontract(addr))) { return nullptr; } ptr->block_lt = start_lt_; return ptr; } -// similar to Collator::make_account() +/** + * Retreives an Account object from the data in the shard state. + * Accounts are cached in the ValidatorQuery's map. + * Similar to Collator::make_account() + * + * @param addr The 256-bit address of the account. + * + * @returns Pointer to the account if found or created successfully. + * Returns nullptr if an error occured. + */ std::unique_ptr ValidateQuery::unpack_account(td::ConstBitPtr addr) { auto dict_entry = ps_.account_dict_->lookup_extra(addr, 256); - auto new_acc = make_account_from(addr, std::move(dict_entry.first), std::move(dict_entry.second)); + auto new_acc = make_account_from(addr, std::move(dict_entry.first)); if (!new_acc) { reject_query("cannot load state of account "s + addr.to_hex(256) + " from previous shardchain state"); return {}; @@ -4243,6 +4769,18 @@ std::unique_ptr ValidateQuery::unpack_account(td::ConstBitPtr ad return new_acc; } +/** + * Checks the validity of a single transaction for a given account. + * Performs transaction execution. + * + * @param account The account of the transaction. + * @param lt The logical time of the transaction. + * @param trans_root The root of the transaction. + * @param is_first Flag indicating if this is the first transaction of the account. + * @param is_last Flag indicating if this is the last transaction of the account. + * + * @returns True if the transaction is valid, false otherwise. + */ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalTime lt, Ref trans_root, bool is_first, bool is_last) { if (!check_timeout()) { @@ -4258,6 +4796,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT bool external{false}, ihr_delivered{false}, need_credit_phase{false}; // check input message block::CurrencyCollection money_imported(0), money_exported(0); + bool is_special_tx = false; // recover/mint transaction if (in_msg_root.not_null()) { auto in_descr_cs = in_msg_dict_->lookup(in_msg_root->get_hash().as_bitslice()); if (in_descr_cs.is_null()) { @@ -4273,6 +4812,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << " has an invalid InMsg record (not one of msg_import_ext, msg_import_fin, " "msg_import_imm or msg_import_ihr)"); } + is_special_tx = is_special_in_msg(*in_descr_cs); // once we know there is a InMsg with correct hash, we already know that it contains a message with this hash (by the verification of InMsg), so it is our message // have still to check its destination address and imported value // and that it refers to this transaction @@ -4290,7 +4830,7 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT << " processed inbound message created later at logical time " << info.created_lt); } - if (info.created_lt != start_lt_ || !is_special_in_msg(*in_descr_cs)) { + if (info.created_lt != start_lt_ || !is_special_tx) { msg_proc_lt_.emplace_back(addr, lt, info.created_lt); } dest = std::move(info.dest); @@ -4483,13 +5023,6 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT int trans_type = block::transaction::Transaction::tr_none; switch (tag) { case block::gen::TransactionDescr::trans_ord: { - if (!block_limit_status_->fits(block::ParamLimits::cl_medium)) { - return reject_query(PSTRING() << "cannod add ordinary transaction because hard block limits are exceeded: " - << "gas_used=" << block_limit_status_->gas_used - << "(limit=" << block_limits_->gas.hard() << "), " - << "lt_delta=" << block_limit_status_->cur_lt - block_limits_->start_lt - << "(limit=" << block_limits_->lt_delta.hard() << ")"); - } trans_type = block::transaction::Transaction::tr_ord; if (in_msg_root.is_null()) { return reject_query(PSTRING() << "ordinary transaction " << lt << " of account " << addr.to_hex() @@ -4637,10 +5170,31 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT return reject_query(PSTRING() << "cannot re-create the serialization of transaction " << lt << " for smart contract " << addr.to_hex()); } - if (!trs->update_limits(*block_limit_status_, false)) { + if (!trs->update_limits(*block_limit_status_, /* with_gas = */ false, /* with_size = */ false)) { return fatal_error(PSTRING() << "cannot update block limit status to include transaction " << lt << " of account " << addr.to_hex()); } + + // Collator should stop if total gas usage exceeds limits, including transactions on special accounts, but without + // ticktocks and mint/recover. + // Here Validator checks a weaker condition + if (!is_special_tx && !trs->gas_limit_overridden && trans_type == block::transaction::Transaction::tr_ord) { + (account.is_special ? total_special_gas_used_ : total_gas_used_) += trs->gas_used(); + } + if (total_gas_used_ > block_limits_->gas.hard() + compute_phase_cfg_.gas_limit) { + return reject_query(PSTRING() << "gas block limits are exceeded: total_gas_used > gas_limit_hard + trx_gas_limit (" + << "total_gas_used=" << total_gas_used_ + << ", gas_limit_hard=" << block_limits_->gas.hard() + << ", trx_gas_limit=" << compute_phase_cfg_.gas_limit << ")"); + } + if (total_special_gas_used_ > block_limits_->gas.hard() + compute_phase_cfg_.special_gas_limit) { + return reject_query( + PSTRING() << "gas block limits are exceeded: total_special_gas_used > gas_limit_hard + special_gas_limit (" + << "total_special_gas_used=" << total_special_gas_used_ + << ", gas_limit_hard=" << block_limits_->gas.hard() + << ", special_gas_limit=" << compute_phase_cfg_.special_gas_limit << ")"); + } + auto trans_root2 = trs->commit(account); if (trans_root2.is_null()) { return reject_query(PSTRING() << "the re-created transaction " << lt << " for smart contract " << addr.to_hex() @@ -4706,7 +5260,15 @@ bool ValidateQuery::check_one_transaction(block::Account& account, ton::LogicalT return true; } -// NB: may be run in parallel for different accounts +/** + * Checks the validity of transactions for a given account block. + * NB: may be run in parallel for different accounts + * + * @param acc_addr The address of the account. + * @param acc_blk_root The root of the AccountBlock. + * + * @returns True if the account transactions are valid, false otherwise. + */ bool ValidateQuery::check_account_transactions(const StdSmcAddress& acc_addr, Ref acc_blk_root) { block::gen::AccountBlock::Record acc_blk; CHECK(tlb::csr_unpack(std::move(acc_blk_root), acc_blk) && acc_blk.account_addr == acc_addr); @@ -4738,6 +5300,11 @@ bool ValidateQuery::check_account_transactions(const StdSmcAddress& acc_addr, Re } } +/** + * Checks all transactions in the account blocks. + * + * @returns True if all transactions pass the check, False otherwise. + */ bool ValidateQuery::check_transactions() { LOG(INFO) << "checking all transactions"; return account_blocks_dict_->check_for_each_extra( @@ -4747,7 +5314,17 @@ bool ValidateQuery::check_transactions() { }); } -// similar to Collator::update_account_public_libraries() +/** + * Processes changes in libraries of an account. + * Used in masterchain validation. + * Similar to Collator::update_account_public_libraries() + * + * @param orig_libs The original libraries of the account. + * @param final_libs The final libraries of the account. + * @param addr The address of the account. + * + * @returns True if the update was successful, false otherwise. + */ bool ValidateQuery::scan_account_libraries(Ref orig_libs, Ref final_libs, const td::Bits256& addr) { vm::Dictionary dict1{std::move(orig_libs), 256}, dict2{std::move(final_libs), 256}; return dict1.scan_diff( @@ -4765,6 +5342,12 @@ bool ValidateQuery::scan_account_libraries(Ref orig_libs, Ref in_msg_root, const block::CurrencyCollection& amount, Ref addr_cell) { if (in_msg_root.is_null()) { @@ -4894,11 +5492,28 @@ bool ValidateQuery::check_special_message(Ref in_msg_root, const block return true; } +/** + * Checks if all necessary special messages are valid and exist in the incoming messages dictionary. + * Used in masterchain validation. + * + * @returns True if special messages are valid, false otherwise. + */ bool ValidateQuery::check_special_messages() { return check_special_message(recover_create_msg_, value_flow_.recovered, config_->get_config_param(3, 1)) && check_special_message(mint_msg_, value_flow_.minted, config_->get_config_param(2, 0)); } +/** + * Checks if an update of LibDescr of as single library update is valid. + * Compares updates in LibDescr against updates of account states. + * Used in masterchain validation. + * + * @param key The 256-bit key of the library. + * @param old_value The old value of the LibDescr + * @param new_value The new value of the LibDescr. + * + * @returns True if the library update is valid, false otherwise. + */ bool ValidateQuery::check_one_library_update(td::ConstBitPtr key, Ref old_value, Ref new_value) { // shared_lib_descr$00 lib:^Cell publishers:(Hashmap 256 True) = LibDescr; @@ -4949,6 +5564,12 @@ bool ValidateQuery::check_one_library_update(td::ConstBitPtr key, Refscan_diff( @@ -4969,6 +5590,11 @@ bool ValidateQuery::check_shard_libraries() { return true; } +/** + * Checks the validity of the new shard state. + * + * @returns True if the new state is valid, false otherwise. + */ bool ValidateQuery::check_new_state() { LOG(INFO) << "checking header of the new shardchain state"; block::gen::ShardStateUnsplit::Record info; @@ -5063,6 +5689,15 @@ bool ValidateQuery::check_new_state() { return true; } +/** + * Checks if a masterchain configuration update is valid. + * Used in masterchain validation. + * + * @param old_conf_params The old configuration parameters. + * @param new_conf_params The new configuration parameters. + * + * @returns True if the update is valid, false otherwise. + */ bool ValidateQuery::check_config_update(Ref old_conf_params, Ref new_conf_params) { if (!block::gen::t_ConfigParams.validate_csr(10000, new_conf_params)) { return reject_query("new configuration failed to pass automated validity checks"); @@ -5167,6 +5802,16 @@ bool ValidateQuery::check_config_update(Ref old_conf_params, Ref< "reason (the suggested configuration appears to be valid)"); } +/** + * Checks if a single entry in the dictionary of previous masterchain blocks is valid and consistent. + * Used in masterchain validation. + * + * @param seqno The sequence number of the entry. + * @param old_val_extra The old value of the entry. + * @param new_val_extra The new value of the entry. + * + * @returns True if the update is valid and consistent, false otherwise. + */ bool ValidateQuery::check_one_prev_dict_update(ton::BlockSeqno seqno, Ref old_val_extra, Ref new_val_extra) { if (old_val_extra.not_null() && new_val_extra.is_null()) { @@ -5221,7 +5866,13 @@ bool ValidateQuery::check_one_prev_dict_update(ton::BlockSeqno seqno, Ref old_val, Ref new_val) { LOG(DEBUG) << "checking update of CreatorStats for "s + key.to_hex(256); @@ -5473,7 +6144,13 @@ bool ValidateQuery::check_one_block_creator_update(td::ConstBitPtr key, Refget_shard_hash(shard); @@ -5541,6 +6228,12 @@ bool ValidateQuery::check_one_shard_fee(ShardIdFull shard, const block::Currency return true; } +/** + * Checks the validity of the McBlockExtra in a masterchain block. + * Used in masterchain validation. + * + * @returns True if the data is valid, false otherwise. + */ bool ValidateQuery::check_mc_block_extra() { if (!is_masterchain()) { return true; @@ -5592,6 +6285,11 @@ bool ValidateQuery::check_mc_block_extra() { return true; } +/** + * Validates the value flow of a block. + * + * @returns True if the value flow is valid, False otherwise. + */ bool ValidateQuery::postcheck_value_flow() { auto expected_fees = value_flow_.fees_imported + value_flow_.created + transaction_fees_ + import_fees_ - fees_burned_; @@ -5630,19 +6328,18 @@ Ref ValidateQuery::get_virt_state_root(td::Bits256 block_root_hash) { return vm::MerkleProof::virtualize_raw(upd_cs.prefetch_ref(1), {0, 1}); } -/* - * - * MAIN VALIDATOR FUNCTION - * (invokes other methods in a suitable order) +/** + * MAIN VALIDATOR FUNCTION (invokes other methods in a suitable order). * + * @returns True if the validation is successful, False otherwise. */ - bool ValidateQuery::try_validate() { if (pending) { return true; } try { if (!stage_) { + LOG(WARNING) << "try_validate stage 0"; if (!compute_prev_state()) { return fatal_error(-666, "cannot compute previous state"); } @@ -5672,6 +6369,7 @@ bool ValidateQuery::try_validate() { return true; } } + LOG(WARNING) << "try_validate stage 1"; LOG(INFO) << "running automated validity checks for block candidate " << id_.to_str(); if (!block::gen::t_Block.validate_ref(10000000, block_root_)) { return reject_query("block "s + id_.to_str() + " failed to pass automated validity checks"); @@ -5742,6 +6440,11 @@ bool ValidateQuery::try_validate() { return save_candidate(); } +/** + * Saves the candidate to disk. + * + * @returns True. + */ bool ValidateQuery::save_candidate() { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this)](td::Result R) { if (R.is_error()) { @@ -5755,6 +6458,10 @@ bool ValidateQuery::save_candidate() { return true; } +/** + * Callback function called after saving block candidate. + * Finishes validation. + */ void ValidateQuery::written_candidate() { finish_query(); } diff --git a/validator/impl/validate-query.hpp b/validator/impl/validate-query.hpp index eb052886..3e48ed1a 100644 --- a/validator/impl/validate-query.hpp +++ b/validator/impl/validate-query.hpp @@ -195,6 +195,7 @@ class ValidateQuery : public td::actor::Actor { ton::LogicalTime prev_key_block_lt_; std::unique_ptr block_limits_; std::unique_ptr block_limit_status_; + td::uint64 total_gas_used_{0}, total_special_gas_used_{0}; LogicalTime start_lt_, end_lt_; UnixTime prev_now_{~0u}, now_{~0u}; @@ -344,8 +345,7 @@ class ValidateQuery : public td::actor::Actor { td::Bits256& msg_hash); bool check_in_queue(); bool check_delivered_dequeued(); - std::unique_ptr make_account_from(td::ConstBitPtr addr, Ref account, - Ref extra); + std::unique_ptr make_account_from(td::ConstBitPtr addr, Ref account); std::unique_ptr unpack_account(td::ConstBitPtr addr); bool check_one_transaction(block::Account& account, LogicalTime lt, Ref trans_root, bool is_first, bool is_last); diff --git a/validator/interfaces/validator-manager.h b/validator/interfaces/validator-manager.h index f63e453d..7762fc4f 100644 --- a/validator/interfaces/validator-manager.h +++ b/validator/interfaces/validator-manager.h @@ -170,6 +170,14 @@ class ValidatorManager : public ValidatorManagerInterface { virtual void log_validator_session_stats(BlockIdExt block_id, validatorsession::ValidatorSessionStats stats) = 0; + virtual void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) = 0; + virtual void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) = 0; + virtual void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) = 0; + virtual void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) = 0; + virtual void validated_new_block(BlockIdExt block_id) = 0; virtual void add_persistent_state_description(td::Ref desc) = 0; diff --git a/validator/manager-disk.cpp b/validator/manager-disk.cpp index 15954e47..2d4b1ca3 100644 --- a/validator/manager-disk.cpp +++ b/validator/manager-disk.cpp @@ -901,7 +901,7 @@ void ValidatorManagerImpl::send_top_shard_block_description(td::Ref R) { R.ensure(); diff --git a/validator/manager-disk.hpp b/validator/manager-disk.hpp index 53c399ed..add71ca5 100644 --- a/validator/manager-disk.hpp +++ b/validator/manager-disk.hpp @@ -23,6 +23,7 @@ #include "validator-group.hpp" #include "manager-init.h" #include "manager-disk.h" +#include "queue-size-counter.hpp" #include #include @@ -385,6 +386,28 @@ class ValidatorManagerImpl : public ValidatorManager { void log_validator_session_stats(BlockIdExt block_id, validatorsession::ValidatorSessionStats stats) override { UNREACHABLE(); } + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + if (queue_size_counter_.empty()) { + queue_size_counter_ = + td::actor::create_actor("queuesizecounter", td::Ref{}, actor_id(this)); + } + td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); + } + void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) override { + get_block_handle(block_id, false, promise.wrap([](BlockHandle &&handle) -> ConstBlockHandle { return handle; })); + } + void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) override { + get_block_by_lt_from_db(account, lt, std::move(promise)); + } + void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) override { + get_block_by_unix_time_from_db(account, ts, std::move(promise)); + } + void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) override { + get_block_by_seqno_from_db(account, seqno, std::move(promise)); + } void validated_new_block(BlockIdExt block_id) override { } void add_persistent_state_description(td::Ref desc) override { @@ -422,6 +445,7 @@ class ValidatorManagerImpl : public ValidatorManager { int pending_new_shard_block_descr_{0}; std::vector>>> waiting_new_shard_block_descr_; + td::actor::ActorOwn queue_size_counter_; void update_shards(); void update_shard_blocks(); diff --git a/validator/manager-hardfork.cpp b/validator/manager-hardfork.cpp index d1be057f..865292c3 100644 --- a/validator/manager-hardfork.cpp +++ b/validator/manager-hardfork.cpp @@ -549,7 +549,7 @@ void ValidatorManagerImpl::register_block_handle(BlockHandle handle, td::Promise } void ValidatorManagerImpl::start_up() { - db_ = create_db_actor(actor_id(this), db_root_); + db_ = create_db_actor(actor_id(this), db_root_, opts_); } void ValidatorManagerImpl::try_get_static_file(FileHash file_hash, td::Promise promise) { diff --git a/validator/manager-hardfork.hpp b/validator/manager-hardfork.hpp index be8e0824..43c979eb 100644 --- a/validator/manager-hardfork.hpp +++ b/validator/manager-hardfork.hpp @@ -23,6 +23,7 @@ #include "validator-group.hpp" #include "manager-init.h" #include "manager-hardfork.h" +#include "queue-size-counter.hpp" #include #include @@ -447,6 +448,28 @@ class ValidatorManagerImpl : public ValidatorManager { void log_validator_session_stats(BlockIdExt block_id, validatorsession::ValidatorSessionStats stats) override { UNREACHABLE(); } + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + if (queue_size_counter_.empty()) { + queue_size_counter_ = + td::actor::create_actor("queuesizecounter", td::Ref{}, actor_id(this)); + } + td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); + } + void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) override { + get_block_handle(block_id, false, promise.wrap([](BlockHandle &&handle) -> ConstBlockHandle { return handle; })); + } + void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) override { + get_block_by_lt_from_db(account, lt, std::move(promise)); + } + void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) override { + get_block_by_unix_time_from_db(account, ts, std::move(promise)); + } + void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) override { + get_block_by_seqno_from_db(account, seqno, std::move(promise)); + } void validated_new_block(BlockIdExt block_id) override { } void add_persistent_state_description(td::Ref desc) override { @@ -473,6 +496,7 @@ class ValidatorManagerImpl : public ValidatorManager { std::string db_root_; ShardIdFull shard_to_generate_; BlockIdExt block_to_generate_; + td::actor::ActorOwn queue_size_counter_; }; } // namespace validator diff --git a/validator/manager.cpp b/validator/manager.cpp index 5b5ad43e..48f1cbdf 100644 --- a/validator/manager.cpp +++ b/validator/manager.cpp @@ -629,6 +629,12 @@ void ValidatorManagerImpl::wait_block_state(BlockHandle handle, td::uint32 prior return promise.set_error( td::Status::Error(PSTRING() << "not monitoring shard " << handle->id().shard_full().to_str())); } + auto it0 = block_state_cache_.find(handle->id()); + if (it0 != block_state_cache_.end()) { + it0->second.ttl_ = td::Timestamp::in(30.0); + promise.set_result(it0->second.state_); + return; + } auto it = wait_state_.find(handle->id()); if (it == wait_state_.end()) { auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result> R) { @@ -889,6 +895,8 @@ void ValidatorManagerImpl::wait_block_message_queue_short(BlockIdExt block_id, t void ValidatorManagerImpl::get_external_messages(ShardIdFull shard, td::Promise>> promise) { + td::Timer t; + size_t processed = 0, deleted = 0; std::vector> res; MessageId left{AccountIdPrefixFull{shard.workchain, shard.shard & (shard.shard - 1)}, Bits256::zero()}; auto it = ext_messages_.lower_bound(left); @@ -897,10 +905,12 @@ void ValidatorManagerImpl::get_external_messages(ShardIdFull shard, if (!shard_contains(shard, s.dst)) { break; } + ++processed; if (it->second->expired()) { ext_addr_messages_[it->second->address()].erase(it->first.hash); ext_messages_hashes_.erase(it->first.hash); it = ext_messages_.erase(it); + ++deleted; continue; } if (it->second->is_active()) { @@ -908,6 +918,9 @@ void ValidatorManagerImpl::get_external_messages(ShardIdFull shard, } it++; } + LOG(WARNING) << "get_external_messages to shard " << shard.to_str() << " : time=" << t.elapsed() + << " result_size=" << res.size() << " processed=" << processed << " expired=" << deleted + << " total_size=" << ext_messages_.size(); promise.set_value(std::move(res)); } @@ -1102,6 +1115,9 @@ void ValidatorManagerImpl::get_block_by_seqno_from_db(AccountIdPrefixFull accoun } void ValidatorManagerImpl::finished_wait_state(BlockHandle handle, td::Result> R) { + if (R.is_ok()) { + block_state_cache_[handle->id()] = {R.ok(), td::Timestamp::in(30.0)}; + } auto it = wait_state_.find(handle->id()); if (it != wait_state_.end()) { if (R.is_error()) { @@ -1461,7 +1477,18 @@ td::Ref ValidatorManagerImpl::do_get_last_liteserver_state() { if (last_masterchain_state_.is_null()) { return {}; } - if (last_liteserver_state_.is_null() || last_liteserver_state_->get_unix_time() < td::Clocks::system() - 30) { + if (last_liteserver_state_.is_null()) { + last_liteserver_state_ = last_masterchain_state_; + return last_liteserver_state_; + } + if (last_liteserver_state_->get_seqno() == last_masterchain_state_->get_seqno()) { + return last_liteserver_state_; + } + // If liteserver seqno (i.e. shard client) lags then use last masterchain state for liteserver + // Allowed lag depends on the block rate + double time_per_block = double(last_masterchain_state_->get_unix_time() - last_liteserver_state_->get_unix_time()) / + double(last_masterchain_state_->get_seqno() - last_liteserver_state_->get_seqno()); + if (td::Clocks::system() - double(last_liteserver_state_->get_unix_time()) > std::min(time_per_block * 8, 180.0)) { last_liteserver_state_ = last_masterchain_state_; } return last_liteserver_state_; @@ -1562,7 +1589,7 @@ void ValidatorManagerImpl::send_get_out_msg_queue_proof_request( } void ValidatorManagerImpl::start_up() { - db_ = create_db_actor(actor_id(this), db_root_); + db_ = create_db_actor(actor_id(this), db_root_, opts_); lite_server_cache_ = create_liteserver_cache_actor(actor_id(this), db_root_); token_manager_ = td::actor::create_actor("tokenmanager"); td::mkdir(db_root_ + "/tmp/").ensure(); @@ -2448,7 +2475,15 @@ void ValidatorManagerImpl::allow_block_info_gc(BlockIdExt block_id, td::Promise< void ValidatorManagerImpl::got_next_gc_masterchain_handle(BlockHandle handle) { CHECK(gc_advancing_); auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), handle](td::Result> R) { - R.ensure(); + if (R.is_error()) { + if (R.error().code() == ErrorCode::timeout) { + LOG(ERROR) << "Failed to get gc masterchain state, retrying: " << R.move_as_error(); + td::actor::send_closure(SelfId, &ValidatorManagerImpl::got_next_gc_masterchain_handle, std::move(handle)); + } else { + LOG(FATAL) << "Failed to get gc masterchain state: " << R.move_as_error(); + } + return; + } td::actor::send_closure(SelfId, &ValidatorManagerImpl::got_next_gc_masterchain_state, std::move(handle), td::Ref{R.move_as_ok()}); }); @@ -2479,8 +2514,11 @@ void ValidatorManagerImpl::update_shard_client_block_handle(BlockHandle handle, td::Promise promise) { shard_client_handle_ = std::move(handle); auto seqno = shard_client_handle_->id().seqno(); - if (last_liteserver_state_.is_null() || last_liteserver_state_->get_block_id().seqno() < seqno) { - last_liteserver_state_ = std::move(state); + if (state.not_null()) { + shard_client_shards_ = state->get_shards(); + if (last_liteserver_state_.is_null() || last_liteserver_state_->get_block_id().seqno() < seqno) { + last_liteserver_state_ = std::move(state); + } } shard_client_update(seqno); promise.set_value(td::Unit()); @@ -2519,15 +2557,15 @@ void ValidatorManagerImpl::alarm() { } if (log_status_at_.is_in_past()) { if (last_masterchain_block_handle_) { - LOG(INFO) << "STATUS: last_masterchain_block_ago=" - << td::format::as_time(td::Clocks::system() - last_masterchain_block_handle_->unix_time()) - << " last_known_key_block_ago=" - << td::format::as_time(td::Clocks::system() - (last_known_key_block_handle_->inited_unix_time() - ? last_known_key_block_handle_->unix_time() - : 0)) - << " shard_client_ago=" - << td::format::as_time(td::Clocks::system() - - (shard_client_handle_ ? shard_client_handle_->unix_time() : 0)); + LOG(ERROR) << "STATUS: last_masterchain_block_ago=" + << td::format::as_time(td::Clocks::system() - last_masterchain_block_handle_->unix_time()) + << " last_known_key_block_ago=" + << td::format::as_time(td::Clocks::system() - (last_known_key_block_handle_->inited_unix_time() + ? last_known_key_block_handle_->unix_time() + : 0)) + << " shard_client_ago=" + << td::format::as_time(td::Clocks::system() - + (shard_client_handle_ ? shard_client_handle_->unix_time() : 0)); } log_status_at_ = td::Timestamp::in(60.0); } @@ -2553,6 +2591,31 @@ void ValidatorManagerImpl::alarm() { for (auto &w : shard_client_waiters_) { w.second.check_timers(); } + for (auto it = block_state_cache_.begin(); it != block_state_cache_.end();) { + bool del = it->second.ttl_.is_in_past(); + if (del) { + auto block_id = it->first; + if (block_id.is_masterchain()) { + if (block_id.seqno() == last_masterchain_seqno_) { + it->second.ttl_ = td::Timestamp::in(30.0); + del = false; + } + } else if (last_masterchain_state_.not_null()) { + auto shard = last_masterchain_state_->get_shard_from_config(block_id.shard_full()); + if (shard.not_null()) { + if (block_id.seqno() == shard->top_block_id().seqno()) { + it->second.ttl_ = td::Timestamp::in(30.0); + del = false; + } + } + } + } + if (del) { + it = block_state_cache_.erase(it); + } else { + ++it; + } + } } alarm_timestamp().relax(check_waiters_at_); if (check_shard_clients_.is_in_past()) { @@ -2577,8 +2640,8 @@ void ValidatorManagerImpl::update_shard_client_state(BlockIdExt masterchain_bloc } void ValidatorManagerImpl::get_shard_client_state(bool from_db, td::Promise promise) { - if (!shard_client_.empty() && !from_db) { - td::actor::send_closure(shard_client_, &ShardClient::get_processed_masterchain_block_id, std::move(promise)); + if (shard_client_handle_ && !from_db) { + promise.set_result(shard_client_handle_->id()); } else { td::actor::send_closure(db_, &Db::get_shard_client_state, std::move(promise)); } @@ -2776,18 +2839,19 @@ void ValidatorManagerImpl::log_validator_session_stats(BlockIdExt block_id, } std::vector> rounds; - for (const auto& round : stats.rounds) { + for (const auto &round : stats.rounds) { std::vector> producers; - for (const auto& producer : round.producers) { + for (const auto &producer : round.producers) { producers.push_back(create_tl_object( - producer.id.bits256_value(), producer.block_status, producer.block_timestamp)); + producer.id.bits256_value(), producer.candidate_id, producer.block_status, producer.block_timestamp, + producer.comment)); } rounds.push_back(create_tl_object(round.timestamp, std::move(producers))); } auto obj = create_tl_object( - create_tl_block_id_simple(block_id.id), stats.timestamp, stats.self.bits256_value(), - stats.creator.bits256_value(), stats.total_validators, stats.total_weight, stats.signatures, + stats.success, create_tl_block_id(block_id), stats.timestamp, stats.self.bits256_value(), stats.session_id, + stats.cc_seqno, stats.creator.bits256_value(), stats.total_validators, stats.total_weight, stats.signatures, stats.signatures_weight, stats.approve_signatures, stats.approve_signatures_weight, stats.first_round, std::move(rounds)); std::string s = td::json_encode(td::ToJson(*obj), false); @@ -2801,6 +2865,143 @@ void ValidatorManagerImpl::log_validator_session_stats(BlockIdExt block_id, LOG(INFO) << "Writing validator session stats for " << block_id.id; } +void ValidatorManagerImpl::get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) { + get_block_handle( + block_id, false, + [SelfId = actor_id(this), block_id, promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_block_handle_for_litequery_error, block_id, + std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) { + get_block_by_lt_from_db( + account, lt, [=, SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_lookup_block_for_litequery_error, account, 0, + lt, std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) { + get_block_by_unix_time_from_db( + account, ts, [=, SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_lookup_block_for_litequery_error, account, 1, + ts, std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) { + get_block_by_seqno_from_db( + account, seqno, + [=, SelfId = actor_id(this), promise = std::move(promise)](td::Result R) mutable { + if (R.is_ok() && R.ok()->is_applied()) { + promise.set_value(R.move_as_ok()); + } else { + td::actor::send_closure(SelfId, &ValidatorManagerImpl::process_lookup_block_for_litequery_error, account, 2, + seqno, std::move(R), std::move(promise)); + } + }); +} + +void ValidatorManagerImpl::process_block_handle_for_litequery_error(BlockIdExt block_id, + td::Result r_handle, + td::Promise promise) { + td::Status err; + if (r_handle.is_error()) { + err = r_handle.move_as_error(); + } else { + auto handle = r_handle.move_as_ok(); + if (handle->is_applied()) { + promise.set_value(std::move(handle)); + return; + } + if (!handle->received() || !handle->received_state()) { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << block_id.id.to_str() << " is not in db"); + } else { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << block_id.id.to_str() << " is not applied"); + } + } + if (block_id.is_masterchain()) { + if (block_id.seqno() > last_masterchain_seqno_) { + err = err.move_as_error_suffix(PSTRING() << " (last known masterchain block: " << last_masterchain_seqno_ << ")"); + } + } else { + for (auto &shard : shard_client_shards_) { + if (shard_intersects(shard->shard(), block_id.shard_full())) { + if (block_id.seqno() > shard->top_block_id().seqno()) { + err = err.move_as_error_suffix( + PSTRING() << " (possibly out of sync: shard_client_seqno=" + << (shard_client_handle_ ? shard_client_handle_->id().seqno() : 0) << " ls_seqno=" + << (last_liteserver_state_.not_null() ? last_liteserver_state_->get_seqno() : 0) << ")"); + } + break; + } + } + } + promise.set_error(std::move(err)); +} + +void ValidatorManagerImpl::process_lookup_block_for_litequery_error(AccountIdPrefixFull account, int type, + td::uint64 value, + td::Result r_handle, + td::Promise promise) { + td::Status err; + if (r_handle.is_error()) { + err = r_handle.move_as_error(); + } else { + auto handle = r_handle.move_as_ok(); + if (handle->is_applied()) { + promise.set_value(std::move(handle)); + return; + } + if (!handle->received() || !handle->received_state()) { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << handle->id().to_str() << " is not in db"); + } else { + err = td::Status::Error(ErrorCode::notready, PSTRING() << "block " << handle->id().to_str() << " is not applied"); + } + } + if (account.is_masterchain()) { + if (value > (type == 0 + ? last_masterchain_state_->get_logical_time() + : (type == 1 ? last_masterchain_state_->get_unix_time() : last_masterchain_state_->get_seqno()))) { + err = err.move_as_error_suffix(PSTRING() << " (last known masterchain block: " << last_masterchain_seqno_ << ")"); + } + } else { + for (auto &shard : shard_client_shards_) { + if (shard_intersects(shard->shard(), account.as_leaf_shard())) { + if (value > (type == 0 ? shard->end_lt() + : (type == 1 ? (shard_client_handle_ ? shard_client_handle_->unix_time() : 0) + : shard->top_block_id().seqno()))) { + err = err.move_as_error_suffix( + PSTRING() << " (possibly out of sync: shard_client_seqno=" + << (shard_client_handle_ ? shard_client_handle_->id().seqno() : 0) << " ls_seqno=" + << (last_liteserver_state_.not_null() ? last_liteserver_state_->get_seqno() : 0) << ")"); + } + break; + } + } + } + static std::string names[3] = {"lt", "utime", "seqno"}; + err = err.move_as_error_prefix(PSTRING() << "cannot find block " << account.to_str() << " " << names[type] << "=" + << value << ": "); + promise.set_error(std::move(err)); +} + void ValidatorManagerImpl::get_validator_sessions_info( td::Promise> promise) { std::vector> groups; diff --git a/validator/manager.hpp b/validator/manager.hpp index 45a0c1e6..e4485fa6 100644 --- a/validator/manager.hpp +++ b/validator/manager.hpp @@ -28,6 +28,7 @@ #include "state-serializer.hpp" #include "rldp/rldp.h" #include "token-manager.h" +#include "queue-size-counter.hpp" #include "collator-node.hpp" #include @@ -190,6 +191,12 @@ class ValidatorManagerImpl : public ValidatorManager { std::map>> wait_state_; std::map>> wait_block_data_; + struct CachedBlockState { + td::Ref state_; + td::Timestamp ttl_; + }; + std::map block_state_cache_; + struct WaitBlockHandle { std::vector> waiting_; }; @@ -262,6 +269,7 @@ class ValidatorManagerImpl : public ValidatorManager { BlockHandle last_key_block_handle_; BlockHandle last_known_key_block_handle_; BlockHandle shard_client_handle_; + std::vector> shard_client_shards_; td::Ref last_liteserver_state_; td::Ref do_get_last_liteserver_state(); @@ -582,6 +590,31 @@ class ValidatorManagerImpl : public ValidatorManager { void del_collator(adnl::AdnlNodeIdShort id, ShardIdFull shard) override; void update_options(td::Ref opts) override; + void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) override { + if (queue_size_counter_.empty()) { + if (last_masterchain_state_.is_null()) { + promise.set_error(td::Status::Error(ErrorCode::notready, "not ready")); + return; + } + queue_size_counter_ = td::actor::create_actor("queuesizecounter", + last_masterchain_state_, actor_id(this)); + } + td::actor::send_closure(queue_size_counter_, &QueueSizeCounter::get_queue_size, block_id, std::move(promise)); + } + + void get_block_handle_for_litequery(BlockIdExt block_id, td::Promise promise) override; + void get_block_by_lt_from_db_for_litequery(AccountIdPrefixFull account, LogicalTime lt, + td::Promise promise) override; + void get_block_by_unix_time_from_db_for_litequery(AccountIdPrefixFull account, UnixTime ts, + td::Promise promise) override; + void get_block_by_seqno_from_db_for_litequery(AccountIdPrefixFull account, BlockSeqno seqno, + td::Promise promise) override; + void process_block_handle_for_litequery_error(BlockIdExt block_id, td::Result r_handle, + td::Promise promise); + void process_lookup_block_for_litequery_error(AccountIdPrefixFull account, int type, td::uint64 value, + td::Result r_handle, + td::Promise promise); + private: td::Timestamp resend_shard_blocks_at_; td::Timestamp check_waiters_at_; @@ -655,6 +688,7 @@ class ValidatorManagerImpl : public ValidatorManager { } std::map> shard_client_waiters_; + td::actor::ActorOwn queue_size_counter_; struct Collator { td::actor::ActorOwn actor; diff --git a/validator/queue-size-counter.cpp b/validator/queue-size-counter.cpp new file mode 100644 index 00000000..4780f202 --- /dev/null +++ b/validator/queue-size-counter.cpp @@ -0,0 +1,301 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#include "queue-size-counter.hpp" +#include "block/block-auto.h" +#include "block/block-parse.h" +#include "common/delay.h" +#include "td/actor/MultiPromise.h" +#include "td/utils/Random.h" + +namespace ton::validator { + +static td::Result calc_queue_size(const td::Ref &state) { + td::uint32 size = 0; + TRY_RESULT(outq_descr, state->message_queue()); + block::gen::OutMsgQueueInfo::Record qinfo; + if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) { + return td::Status::Error("invalid message queue"); + } + vm::AugmentedDictionary queue{qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue}; + bool ok = queue.check_for_each([&](td::Ref, td::ConstBitPtr, int) -> bool { + ++size; + return true; + }); + if (!ok) { + return td::Status::Error("invalid message queue dict"); + } + return size; +} + +static td::Result recalc_queue_size(const td::Ref &state, const td::Ref &prev_state, + td::uint32 prev_size) { + TRY_RESULT(outq_descr, state->message_queue()); + block::gen::OutMsgQueueInfo::Record qinfo; + if (!tlb::unpack_cell(outq_descr->root_cell(), qinfo)) { + return td::Status::Error("invalid message queue"); + } + vm::AugmentedDictionary queue{qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue}; + + TRY_RESULT(prev_outq_descr, prev_state->message_queue()); + block::gen::OutMsgQueueInfo::Record prev_qinfo; + if (!tlb::unpack_cell(prev_outq_descr->root_cell(), prev_qinfo)) { + return td::Status::Error("invalid message queue"); + } + vm::AugmentedDictionary prev_queue{prev_qinfo.out_queue->prefetch_ref(0), 352, block::tlb::aug_OutMsgQueue}; + td::uint32 add = 0, rem = 0; + bool ok = prev_queue.scan_diff( + queue, [&](td::ConstBitPtr, int, td::Ref prev_val, td::Ref new_val) -> bool { + if (prev_val.not_null()) { + ++rem; + } + if (new_val.not_null()) { + ++add; + } + return true; + }); + if (!ok) { + return td::Status::Error("invalid message queue dict"); + } + if (prev_size + add < rem) { + return td::Status::Error("negative value"); + } + return prev_size + add - rem; +} + +void QueueSizeCounter::start_up() { + if (init_masterchain_state_.is_null()) { + // Used in manager-hardfork or manager-disk + simple_mode_ = true; + return; + } + current_seqno_ = init_masterchain_state_->get_seqno(); + process_top_shard_blocks_cont(init_masterchain_state_, true); + init_masterchain_state_ = {}; + alarm(); +} + +void QueueSizeCounter::get_queue_size(BlockIdExt block_id, td::Promise promise) { + get_queue_size_ex(block_id, simple_mode_ || is_block_too_old(block_id), std::move(promise)); +} + +void QueueSizeCounter::get_queue_size_ex(ton::BlockIdExt block_id, bool calc_whole, td::Promise promise) { + Entry &entry = results_[block_id]; + if (entry.done_) { + promise.set_result(entry.queue_size_); + return; + } + entry.promises_.push_back(std::move(promise)); + if (entry.started_) { + return; + } + entry.started_ = true; + entry.calc_whole_ = calc_whole; + td::actor::send_closure(manager_, &ValidatorManager::get_block_handle, block_id, true, + [SelfId = actor_id(this), block_id, manager = manager_](td::Result R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, block_id, R.move_as_error()); + return; + } + BlockHandle handle = R.move_as_ok(); + td::actor::send_closure( + manager, &ValidatorManager::wait_block_state, handle, 0, td::Timestamp::in(10.0), + [SelfId, handle](td::Result> R) mutable { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, handle->id(), + R.move_as_error()); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::get_queue_size_cont, + std::move(handle), R.move_as_ok()); + }); + }); +} + +void QueueSizeCounter::get_queue_size_cont(BlockHandle handle, td::Ref state) { + Entry &entry = results_[handle->id()]; + CHECK(entry.started_); + bool calc_whole = entry.calc_whole_ || handle->id().seqno() == 0; + if (!calc_whole) { + CHECK(handle->inited_prev()); + auto prev_blocks = handle->prev(); + bool after_split = prev_blocks.size() == 1 && handle->id().shard_full() != prev_blocks[0].shard_full(); + bool after_merge = prev_blocks.size() == 2; + calc_whole = after_split || after_merge; + } + if (calc_whole) { + auto r_size = calc_queue_size(state); + if (r_size.is_error()) { + on_error(handle->id(), r_size.move_as_error()); + return; + } + entry.done_ = true; + entry.queue_size_ = r_size.move_as_ok(); + for (auto &promise : entry.promises_) { + promise.set_result(entry.queue_size_); + } + entry.promises_.clear(); + return; + } + + auto prev_block_id = handle->one_prev(true); + get_queue_size(prev_block_id, [=, SelfId = actor_id(this), manager = manager_](td::Result R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, state->get_block_id(), R.move_as_error()); + return; + } + td::uint32 prev_size = R.move_as_ok(); + td::actor::send_closure( + manager, &ValidatorManager::wait_block_state_short, prev_block_id, 0, td::Timestamp::in(10.0), + [=](td::Result> R) { + if (R.is_error()) { + td::actor::send_closure(SelfId, &QueueSizeCounter::on_error, state->get_block_id(), R.move_as_error()); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::get_queue_size_cont2, state, R.move_as_ok(), prev_size); + }); + }); +} + +void QueueSizeCounter::get_queue_size_cont2(td::Ref state, td::Ref prev_state, + td::uint32 prev_size) { + BlockIdExt block_id = state->get_block_id(); + Entry &entry = results_[block_id]; + CHECK(entry.started_); + auto r_size = recalc_queue_size(state, prev_state, prev_size); + if (r_size.is_error()) { + on_error(block_id, r_size.move_as_error()); + return; + } + entry.done_ = true; + entry.queue_size_ = r_size.move_as_ok(); + for (auto &promise : entry.promises_) { + promise.set_result(entry.queue_size_); + } + entry.promises_.clear(); +} + +void QueueSizeCounter::on_error(ton::BlockIdExt block_id, td::Status error) { + auto it = results_.find(block_id); + if (it == results_.end()) { + return; + } + Entry &entry = it->second; + CHECK(!entry.done_); + for (auto &promise : entry.promises_) { + promise.set_error(error.clone()); + } + results_.erase(it); +} + +void QueueSizeCounter::process_top_shard_blocks() { + LOG(DEBUG) << "QueueSizeCounter::process_top_shard_blocks seqno=" << current_seqno_; + td::actor::send_closure( + manager_, &ValidatorManager::get_block_by_seqno_from_db, AccountIdPrefixFull{masterchainId, 0}, current_seqno_, + [SelfId = actor_id(this), manager = manager_](td::Result R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to get masterchain block id: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks); }, + td::Timestamp::in(5.0)); + return; + } + td::actor::send_closure( + manager, &ValidatorManager::wait_block_state_short, R.ok()->id(), 0, td::Timestamp::in(10.0), + [=](td::Result> R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to get masterchain state: " << R.move_as_error(); + delay_action([=]() { td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks); }, + td::Timestamp::in(5.0)); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks_cont, + td::Ref(R.move_as_ok()), false); + }); + }); +} + +void QueueSizeCounter::process_top_shard_blocks_cont(td::Ref state, bool init) { + LOG(DEBUG) << "QueueSizeCounter::process_top_shard_blocks_cont seqno=" << current_seqno_ << " init=" << init; + td::MultiPromise mp; + auto ig = mp.init_guard(); + last_top_blocks_.clear(); + last_top_blocks_.push_back(state->get_block_id()); + for (auto &shard : state->get_shards()) { + last_top_blocks_.push_back(shard->top_block_id()); + } + for (const BlockIdExt &block_id : last_top_blocks_) { + get_queue_size_ex_retry(block_id, init, ig.get_promise()); + } + ig.add_promise([SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks_finish); + }); + if (init) { + init_top_blocks_ = last_top_blocks_; + } +} + +void QueueSizeCounter::get_queue_size_ex_retry(BlockIdExt block_id, bool calc_whole, td::Promise promise) { + get_queue_size_ex(block_id, calc_whole, + [=, promise = std::move(promise), SelfId = actor_id(this)](td::Result R) mutable { + if (R.is_error()) { + LOG(WARNING) << "Failed to calculate queue size for block " << block_id.to_str() << ": " + << R.move_as_error(); + delay_action( + [=, promise = std::move(promise)]() mutable { + td::actor::send_closure(SelfId, &QueueSizeCounter::get_queue_size_ex_retry, block_id, + calc_whole, std::move(promise)); + }, + td::Timestamp::in(5.0)); + return; + } + promise.set_result(td::Unit()); + }); +} + +void QueueSizeCounter::process_top_shard_blocks_finish() { + ++current_seqno_; + wait_shard_client(); +} + +void QueueSizeCounter::wait_shard_client() { + LOG(DEBUG) << "QueueSizeCounter::wait_shard_client seqno=" << current_seqno_; + td::actor::send_closure( + manager_, &ValidatorManager::wait_shard_client_state, current_seqno_, td::Timestamp::in(60.0), + [SelfId = actor_id(this)](td::Result R) { + if (R.is_error()) { + delay_action([=]() mutable { td::actor::send_closure(SelfId, &QueueSizeCounter::wait_shard_client); }, + td::Timestamp::in(5.0)); + return; + } + td::actor::send_closure(SelfId, &QueueSizeCounter::process_top_shard_blocks); + }); +} + +void QueueSizeCounter::alarm() { + for (auto it = results_.begin(); it != results_.end();) { + if (it->second.done_ && is_block_too_old(it->first)) { + it = results_.erase(it); + } else { + ++it; + } + } + alarm_timestamp() = td::Timestamp::in(td::Random::fast(20.0, 40.0)); +} + +} // namespace ton::validator \ No newline at end of file diff --git a/validator/queue-size-counter.hpp b/validator/queue-size-counter.hpp new file mode 100644 index 00000000..fabb0cec --- /dev/null +++ b/validator/queue-size-counter.hpp @@ -0,0 +1,82 @@ +/* + This file is part of TON Blockchain Library. + + TON Blockchain Library is free software: you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as published by + the Free Software Foundation, either version 2 of the License, or + (at your option) any later version. + + TON Blockchain Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public License + along with TON Blockchain Library. If not, see . +*/ +#pragma once +#include "interfaces/validator-manager.h" + +namespace ton::validator { + +class QueueSizeCounter : public td::actor::Actor { + public: + QueueSizeCounter(td::Ref last_masterchain_state, td::actor::ActorId manager) + : init_masterchain_state_(last_masterchain_state), manager_(std::move(manager)) { + } + + void start_up() override; + void get_queue_size(BlockIdExt block_id, td::Promise promise); + void alarm() override; + + private: + td::Ref init_masterchain_state_; + td::actor::ActorId manager_; + bool simple_mode_ = false; + + BlockSeqno current_seqno_ = 0; + std::vector init_top_blocks_; + std::vector last_top_blocks_; + + struct Entry { + bool started_ = false; + bool done_ = false; + bool calc_whole_ = false; + td::uint32 queue_size_ = 0; + std::vector> promises_; + }; + std::map results_; + + void get_queue_size_ex(BlockIdExt block_id, bool calc_whole, td::Promise promise); + void get_queue_size_cont(BlockHandle handle, td::Ref state); + void get_queue_size_cont2(td::Ref state, td::Ref prev_state, td::uint32 prev_size); + void on_error(BlockIdExt block_id, td::Status error); + + void process_top_shard_blocks(); + void process_top_shard_blocks_cont(td::Ref state, bool init = false); + void get_queue_size_ex_retry(BlockIdExt block_id, bool calc_whole, td::Promise promise); + void process_top_shard_blocks_finish(); + void wait_shard_client(); + + bool is_block_too_old(const BlockIdExt& block_id) const { + for (const BlockIdExt& top_block : last_top_blocks_) { + if (shard_intersects(block_id.shard_full(), top_block.shard_full())) { + if (block_id.seqno() + 100 < top_block.seqno()) { + return true; + } + break; + } + } + for (const BlockIdExt& init_top_block : init_top_blocks_) { + if (shard_intersects(block_id.shard_full(), init_top_block.shard_full())) { + if (block_id.seqno() < init_top_block.seqno()) { + return true; + } + break; + } + } + return false; + } +}; + +} // namespace ton::validator diff --git a/validator/validator-group.cpp b/validator/validator-group.cpp index 5c7bef5b..39bc46de 100644 --- a/validator/validator-group.cpp +++ b/validator/validator-group.cpp @@ -88,10 +88,6 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat return; } - if (approved_candidates_cache_round_ != round_id) { - approved_candidates_cache_round_ = round_id; - approved_candidates_cache_.clear(); - } auto next_block_id = create_next_block_id(block.id.root_hash, block.id.file_hash); block.id = next_block_id; @@ -119,7 +115,7 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat auto v = R.move_as_ok(); v.visit(td::overloaded( [&](UnixTime ts) { - td::actor::send_closure(SelfId, &ValidatorGroup::update_approve_cache, round_id, block_to_cache_key(block), + td::actor::send_closure(SelfId, &ValidatorGroup::update_approve_cache, block_to_cache_key(block), ts); promise.set_result(ts); }, @@ -133,17 +129,14 @@ void ValidatorGroup::validate_block_candidate(td::uint32 round_id, BlockCandidat P.set_error(td::Status::Error(ErrorCode::notready, "validator group not started")); return; } - VLOG(VALIDATOR_DEBUG) << "validating block candidate " << next_block_id.to_str(); + VLOG(VALIDATOR_DEBUG) << "validating block candidate " << next_block_id; block.id = next_block_id; run_validate_query(shard_, min_masterchain_block_id_, prev_block_ids_, std::move(block), validator_set_, manager_, td::Timestamp::in(15.0), std::move(P), collator_config_.full_collated_data ? ValidateMode::full_collated_data : 0); } -void ValidatorGroup::update_approve_cache(td::uint32 round_id, CacheKey key, UnixTime value) { - if (approved_candidates_cache_round_ != round_id) { - return; - } +void ValidatorGroup::update_approve_cache(CacheKey key, UnixTime value) { approved_candidates_cache_[key] = value; } @@ -153,6 +146,7 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s std::vector approve_signatures, validatorsession::ValidatorSessionStats stats, td::Promise promise) { + stats.cc_seqno = validator_set_->get_catchain_seqno(); if (round_id >= last_known_round_id_) { last_known_round_id_ = round_id + 1; } @@ -167,6 +161,7 @@ void ValidatorGroup::accept_block_candidate(td::uint32 round_id, PublicKeyHash s return; } auto next_block_id = create_next_block_id(root_hash, file_hash); + LOG(WARNING) << "Accepted block " << next_block_id; td::actor::send_closure(manager_, &ValidatorManager::log_validator_session_stats, next_block_id, std::move(stats)); auto block = block_data.size() > 0 ? create_block(next_block_id, std::move(block_data)).move_as_ok() : td::Ref{}; @@ -190,8 +185,30 @@ void ValidatorGroup::accept_block_query(BlockIdExt block_id, td::Ref return; } LOG_CHECK(R.error().code() == ErrorCode::timeout || R.error().code() == ErrorCode::notready) << R.move_as_error(); - td::actor::send_closure(SelfId, &ValidatorGroup::accept_block_query, block_id, std::move(block), std::move(prev), - std::move(sig_set), std::move(approve_sig_set), false, std::move(promise), true); + td::actor::send_closure(SelfId, &ValidatorGroup::retry_accept_block_query, block_id, std::move(block), + std::move(prev), std::move(sig_set), std::move(approve_sig_set), std::move(promise)); + } else { + promise.set_value(R.move_as_ok()); + } + }); + + run_accept_block_query(next_block_id, std::move(block), prev_block_ids_, validator_set_, std::move(sig_set), + std::move(approve_sig_set), src == local_id_, manager_, std::move(P)); + prev_block_ids_ = std::vector{next_block_id}; + cached_collated_block_ = nullptr; + approved_candidates_cache_.clear(); +} + +void ValidatorGroup::retry_accept_block_query(BlockIdExt block_id, td::Ref block, + std::vector prev, td::Ref sig_set, + td::Ref approve_sig_set, + td::Promise promise) { + auto P = td::PromiseCreator::lambda([SelfId = actor_id(this), block_id, block, prev, sig_set, approve_sig_set, + promise = std::move(promise)](td::Result R) mutable { + if (R.is_error()) { + LOG_CHECK(R.error().code() == ErrorCode::timeout) << R.move_as_error(); + td::actor::send_closure(SelfId, &ValidatorGroup::retry_accept_block_query, block_id, std::move(block), + std::move(prev), std::move(sig_set), std::move(approve_sig_set), std::move(promise)); } else { promise.set_value(R.move_as_ok()); } @@ -343,6 +360,7 @@ void ValidatorGroup::start(std::vector prev, BlockIdExt min_masterch prev_block_ids_ = prev; min_masterchain_block_id_ = min_masterchain_block_id; cached_collated_block_ = nullptr; + approved_candidates_cache_.clear(); started_ = true; if (init_) { @@ -365,6 +383,19 @@ void ValidatorGroup::start(std::vector prev, BlockIdExt min_masterch void ValidatorGroup::destroy() { if (!session_.empty()) { + td::actor::send_closure(session_, &validatorsession::ValidatorSession::get_current_stats, + [manager = manager_, cc_seqno = validator_set_->get_catchain_seqno(), + block_id = create_next_block_id(RootHash::zero(), FileHash::zero())]( + td::Result R) { + if (R.is_error()) { + LOG(WARNING) << "Failed to get validator session stats: " << R.move_as_error(); + return; + } + auto stats = R.move_as_ok(); + stats.cc_seqno = cc_seqno; + td::actor::send_closure(manager, &ValidatorManager::log_validator_session_stats, block_id, + std::move(stats)); + }); auto ses = session_.release(); delay_action([ses]() mutable { td::actor::send_closure(ses, &validatorsession::ValidatorSession::destroy); }, td::Timestamp::in(10.0)); diff --git a/validator/validator-group.hpp b/validator/validator-group.hpp index 4c08b168..383962a7 100644 --- a/validator/validator-group.hpp +++ b/validator/validator-group.hpp @@ -141,9 +141,8 @@ class ValidatorGroup : public td::actor::Actor { typedef std::tuple CacheKey; std::map approved_candidates_cache_; - td::uint32 approved_candidates_cache_round_ = 0; - void update_approve_cache(td::uint32 round_id, CacheKey key, UnixTime value); + void update_approve_cache(CacheKey key, UnixTime value); static CacheKey block_to_cache_key(const BlockCandidate& block) { return std::make_tuple(block.pubkey.as_bits256(), block.id, sha256_bits256(block.data), block.collated_file_hash); diff --git a/validator/validator-options.hpp b/validator/validator-options.hpp index fa7eb5f9..d446c089 100644 --- a/validator/validator-options.hpp +++ b/validator/validator-options.hpp @@ -112,6 +112,9 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { std::string get_session_logs_file() const override { return session_logs_file_; } + td::uint32 get_celldb_compress_depth() const override { + return celldb_compress_depth_; + } ValidatorMode validator_mode() const override { return validator_mode_; } @@ -168,6 +171,9 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { void set_session_logs_file(std::string f) override { session_logs_file_ = std::move(f); } + void set_celldb_compress_depth(td::uint32 value) override { + celldb_compress_depth_ = value; + } void set_validator_mode(ValidatorMode value) override { validator_mode_ = value; } @@ -211,6 +217,7 @@ struct ValidatorManagerOptionsImpl : public ValidatorManagerOptions { BlockSeqno truncate_{0}; BlockSeqno sync_upto_{0}; std::string session_logs_file_; + td::uint32 celldb_compress_depth_{0}; ValidatorMode validator_mode_ = validator_normal; }; diff --git a/validator/validator.h b/validator/validator.h index 506950e7..64537a7a 100644 --- a/validator/validator.h +++ b/validator/validator.h @@ -81,6 +81,7 @@ struct ValidatorManagerOptions : public td::CntObject { virtual BlockSeqno get_truncate_seqno() const = 0; virtual BlockSeqno sync_upto() const = 0; virtual std::string get_session_logs_file() const = 0; + virtual td::uint32 get_celldb_compress_depth() const = 0; virtual ValidatorMode validator_mode() const = 0; virtual void set_zero_block_id(BlockIdExt block_id) = 0; @@ -100,6 +101,7 @@ struct ValidatorManagerOptions : public td::CntObject { virtual void truncate_db(BlockSeqno seqno) = 0; virtual void set_sync_upto(BlockSeqno seqno) = 0; virtual void set_session_logs_file(std::string f) = 0; + virtual void set_celldb_compress_depth(td::uint32 value) = 0; virtual void set_validator_mode(ValidatorMode value) = 0; static td::Ref create( @@ -239,6 +241,8 @@ class ValidatorManagerInterface : public td::actor::Actor { virtual void prepare_perf_timer_stats(td::Promise> promise) = 0; virtual void add_perf_timer_stat(std::string name, double duration) = 0; + virtual void get_out_msg_queue_size(BlockIdExt block_id, td::Promise promise) = 0; + virtual void get_validator_sessions_info( td::Promise> promise) = 0;